1
0
Fork 0
mirror of https://github.com/tensorflow/haskell.git synced 2024-12-26 03:29:45 +01:00
tensorflow-haskell/tensorflow-nn/tests/NNTest.hs
Judah Jacobson d62c614695 Distinguish between "rendered" and "unrendered" Tensors. (#88)
Distinguish between "rendered" and "unrendered" Tensors.

There are now three types of `Tensor`:

- `Tensor Value a`: rendered value
- `Tensor Ref a`: rendered reference
- `Tensor Build a` : unrendered value

The extra bookkeeping makes it easier to track (and enforce) which tensors are
rendered or not.  For examples where this has been confusing in the past, see

With this change, pure ops look similar to before, returning `Tensor Build`
instead of `Tensor Value`.  "Stateful" (monadic) ops are unchanged.  For
example:

    add :: OneOf [..] t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
    assign :: (MonadBuild m, TensorType t)
           => Tensor Ref t -> Tensor v'2 t -> m (Tensor Ref t)

The `gradients` function now requires that the variables over which it's
differentiating are pre-rendered:

    gradients :: (..., Rendered v2) => Tensor v1 a -> [Tensor v2 a]
              -> m [Tensor Value a]

(`Rendered v2` means that `v2` is either a `Ref` or a `Value`.)

Additionally, the implementation of `gradients` now takes care to render every
intermediate value when performing the reverse accumulation.  I suspect this
fixes an exponential blowup for complicated expressions.
2017-04-06 15:10:33 -07:00

103 lines
3.6 KiB
Haskell

-- Copyright 2016 TensorFlow authors.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE OverloadedLists #-}
module Main where
import Google.Test (googleTest)
import TensorFlow.Test (assertAllClose)
import Test.Framework (Test)
import Test.Framework.Providers.HUnit (testCase)
import qualified Data.Vector as V
import qualified TensorFlow.Gradient as TF
import qualified TensorFlow.NN as TF
import qualified TensorFlow.Ops as TF
import qualified TensorFlow.Core as TF
-- | These tests are ported from:
--
-- <tensorflow>/tensorflow/python/ops/nn_xent_tests.py
--
-- This is the implementation we use to check the implementation we
-- wrote in `TensorFlow.NN.sigmoidCrossEntropyWithLogits`.
--
sigmoidXentWithLogits :: Floating a => Ord a => [a] -> [a] -> [a]
sigmoidXentWithLogits logits' targets' =
let sig = map (\x -> 1 / (1 + exp (-x))) logits'
eps = 0.0001
predictions = map (\p -> min (max p eps) (1 - eps)) sig
xent y z = (-z) * (log y) - (1 - z) * log (1 - y)
in zipWith xent predictions targets'
data Inputs = Inputs {
logits :: [Float]
, targets :: [Float]
}
defInputs :: Inputs
defInputs = Inputs {
logits = [-100, -2, -2, 0, 2, 2, 2, 100]
, targets = [ 0, 0, 1, 0, 0, 1, 0.5, 1]
}
testLogisticOutput :: Test
testLogisticOutput = testCase "testLogisticOutput" $ do
let inputs = defInputs
r <- run $ do
vLogits <- TF.render $ TF.vector $ logits inputs
vTargets <- TF.render $ TF.vector $ targets inputs
TF.sigmoidCrossEntropyWithLogits vLogits vTargets
let ourLoss = V.fromList $ sigmoidXentWithLogits (logits inputs) (targets inputs)
assertAllClose r ourLoss
testLogisticOutputMultipleDim :: Test
testLogisticOutputMultipleDim =
testCase "testLogisticOutputMultipleDim" $ do
let inputs = defInputs
shape = [2, 2, 2]
r <- run $ do
vLogits <- TF.render $ TF.constant shape (logits inputs)
vTargets <- TF.render $ TF.constant shape (targets inputs)
TF.sigmoidCrossEntropyWithLogits vLogits vTargets
let ourLoss = V.fromList $ sigmoidXentWithLogits (logits inputs) (targets inputs)
assertAllClose r ourLoss
testGradientAtZero :: Test
testGradientAtZero = testCase "testGradientAtZero" $ do
r <- run $ do
let inputs = defInputs { logits = [0, 0], targets = [0, 1] }
vTargets <- TF.render $ TF.vector $ targets inputs
vLogits <- TF.render $ TF.vector $ logits inputs
let tfLoss = TF.sigmoidCrossEntropyWithLogits vLogits vTargets
l <- tfLoss
TF.gradients l [vLogits]
assertAllClose (head r) (V.fromList [0.5, -0.5])
run :: TF.Fetchable t a => TF.Session t -> IO a
run = TF.runSession . (>>= TF.run)
main :: IO ()
main = googleTest [ testGradientAtZero
, testLogisticOutput
, testLogisticOutputMultipleDim
]