1
0
mirror of https://github.com/tensorflow/haskell.git synced 2024-06-02 19:13:34 +02:00

One can never be --pedantic enough.

This commit is contained in:
Greg Steuck 2016-11-17 17:52:17 -08:00
parent f2aff81401
commit 15060298ed
2 changed files with 30 additions and 19 deletions

View File

@ -22,9 +22,9 @@ import Data.Int (Int32, Int64)
import Data.List (genericLength)
import Google.Test (googleTest)
import TensorFlow.EmbeddingOps (embeddingLookup)
import Test.Framework (Test)
import Test.Framework.Providers.QuickCheck2 (testProperty)
import Test.HUnit.Lang (Assertion(..))
import Test.HUnit ((@=?), (@?))
import Test.HUnit ((@=?))
import Test.Framework.Providers.HUnit (testCase)
import Test.QuickCheck (Arbitrary(..), Property, choose, vectorOf)
import Test.QuickCheck.Monadic (monadicIO, run)
@ -46,13 +46,14 @@ buildAndRun = TF.runSession . TF.buildAnd TF.run
-- | Tries to perform a simple embedding lookup, with two partitions.
testEmbeddingLookupHasRightShapeWithPartition =
testEmbeddingLookupHasRightShapeWithPartition :: Test
testEmbeddingLookupHasRightShapeWithPartition =
testCase "testEmbeddingLookupHasRightShapeWithPartition" $ do
let shape = TF.Shape [1, 3] -- Consider a 3-dim embedding of two items.
let embShape = TF.Shape [1, 3] -- Consider a 3-dim embedding of two items.
let embedding1 = [1, 1, 1 :: Int32]
let embedding2 = [0, 0, 0 :: Int32]
let embedding = [ TF.constant shape embedding1
, TF.constant shape embedding2
let embedding = [ TF.constant embShape embedding1
, TF.constant embShape embedding2
]
let idValues = [0, 1 :: Int32]
@ -71,15 +72,16 @@ testEmbeddingLookupHasRightShapeWithPartition =
-- | Tries to perform a simple embedding lookup, with only a single partition.
testEmbeddingLookupHasRightShape =
testEmbeddingLookupHasRightShape :: Test
testEmbeddingLookupHasRightShape =
testCase "testEmbeddingLookupHasRightShape" $ do
-- Consider a 3-dim embedding of two items
let shape = TF.Shape [2, 3]
let embShape = TF.Shape [2, 3]
let embeddingInit = [ 1, 1, 1
, 0, 0, 0 :: Int32
]
let embedding = TF.constant shape embeddingInit
let embedding = TF.constant embShape embeddingInit
let idValues = [0, 1 :: Int32]
let ids = TF.constant (TF.Shape [1, 2]) idValues
let op = embeddingLookup [embedding] ids
@ -96,6 +98,7 @@ testEmbeddingLookupHasRightShape =
-- | Check that we can calculate gradients w.r.t embeddings.
testEmbeddingLookupGradients :: Test
testEmbeddingLookupGradients = testCase "testEmbeddingLookupGradients" $ do
-- Agrees with "embedding", so gradient should be zero.
let xVals = V.fromList ([20, 20 :: Float])
@ -103,15 +106,15 @@ testEmbeddingLookupGradients = testCase "testEmbeddingLookupGradients" $ do
gs <- TF.runSession $ do
grads <- TF.build $ do
let shape = TF.Shape [2, 1]
let embShape = TF.Shape [2, 1]
let embeddingInit = [1, 20 ::Float]
let idValues = [1, 1 :: Int32]
let ids = TF.constant (TF.Shape [1, 2]) idValues
x <- TF.placeholder (TF.Shape [2])
embedding <- TF.initializedVariable
=<< TF.render (TF.constant shape embeddingInit)
x <- TF.placeholder (TF.Shape [2])
embedding <- TF.initializedVariable
=<< TF.render (TF.constant embShape embeddingInit)
op <- embeddingLookup [embedding] ids
let twoNorm = CoreOps.square $ TF.abs (op - x)
loss = TF.mean twoNorm (TF.scalar (0 :: Int32))
@ -163,7 +166,9 @@ instance Arbitrary a => Arbitrary (LookupExample a) where
arbitrary = do
rank <- choose (1, 4)
-- Takes rank-th root of 100 to cap the tensor size.
let maxDim = fromIntegral $ ceiling $ 100 ** (1 / fromIntegral rank)
let maxDim = fromIntegral (ceiling doubleMaxDim :: Int64)
doubleMaxDim :: Double
doubleMaxDim = 100 ** (1 / fromIntegral rank)
shape@(firstDim : _) <- vectorOf rank (choose (1, maxDim))
values <- vectorOf (fromIntegral $ product shape) arbitrary
numParts <- choose (2, 15)

View File

@ -12,9 +12,8 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
{-# LANGUAGE OverloadedLists #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE NoMonomorphismRestriction #-}
{-# LANGUAGE RankNTypes #-}
module Main where
@ -22,6 +21,7 @@ import Control.Monad.IO.Class (liftIO)
import Data.Int (Int32, Int64)
import Google.Test (googleTest)
import System.IO.Temp (withSystemTempDirectory)
import Test.Framework (Test)
import Test.Framework.Providers.HUnit (testCase)
import Test.HUnit ((@=?))
import qualified Data.ByteString.Char8 as B8
@ -33,25 +33,31 @@ import qualified TensorFlow.Nodes as TF
import qualified TensorFlow.Ops as TF
import qualified TensorFlow.Session as TF
import qualified TensorFlow.Tensor as TF
import qualified TensorFlow.Types as TF
-- | Test that one can easily determine number of elements in the tensor.
testSize :: Test
testSize = testCase "testSize" $ do
x <- eval $ TF.size (TF.constant [2, 3] [0..5 :: Float])
x <- eval $ TF.size (TF.constant (TF.Shape [2, 3]) [0..5 :: Float])
TF.Scalar (2 * 3 :: Int32) @=? x
eval :: forall a t. TF.Fetchable t a => t -> IO a
eval = TF.runSession . TF.buildAnd TF.run . return
-- | Confirms that the original example from Python code works.
testReducedShape :: Test
testReducedShape = testCase "testReducedShape" $ do
x <- eval $ TF.reducedShape (TF.vector [2, 3, 5, 7 :: Int64])
(TF.vector [1, 2 :: Int32])
V.fromList [2, 1, 1, 7 :: Int32] @=? x
testSaveRestore :: Test
testSaveRestore = testCase "testSaveRestore" $
withSystemTempDirectory "" $ \dirPath -> do
let path = B8.pack $ dirPath ++ "/checkpoint"
var :: TF.Build (TF.Tensor TF.Ref Float)
var = TF.render =<< TF.named "a" <$> TF.zeroInitializedVariable []
var = TF.render =<<
TF.named "a" <$> TF.zeroInitializedVariable (TF.Shape [])
TF.runSession $ do
v <- TF.build var
TF.buildAnd TF.run_ $ TF.assign v 134