1
0
Fork 0
mirror of https://github.com/tensorflow/haskell.git synced 2024-11-23 11:29:43 +01:00

Moved reduceMean to Ops (#136)

This commit is contained in:
Christian Berentsen 2017-06-21 05:50:46 +02:00 committed by Judah Jacobson
parent 042910b000
commit 4ab9cb9cf2
3 changed files with 23 additions and 10 deletions

View file

@ -41,9 +41,6 @@ randomParam width (TF.Shape shape) =
where where
stddev = TF.scalar (1 / sqrt (fromIntegral width)) stddev = TF.scalar (1 / sqrt (fromIntegral width))
reduceMean :: TF.Tensor TF.Build Float -> TF.Tensor TF.Build Float
reduceMean xs = TF.mean xs (TF.scalar (0 :: Int32))
-- Types must match due to model structure. -- Types must match due to model structure.
type LabelType = Int32 type LabelType = Int32
@ -85,12 +82,12 @@ createModel = do
labels <- TF.placeholder [batchSize] labels <- TF.placeholder [batchSize]
let labelVecs = TF.oneHot labels (fromIntegral numLabels) 1 0 let labelVecs = TF.oneHot labels (fromIntegral numLabels) 1 0
loss = loss =
reduceMean $ fst $ TF.softmaxCrossEntropyWithLogits logits labelVecs TF.reduceMean $ fst $ TF.softmaxCrossEntropyWithLogits logits labelVecs
params = [hiddenWeights, hiddenBiases, logitWeights, logitBiases] params = [hiddenWeights, hiddenBiases, logitWeights, logitBiases]
trainStep <- TF.minimizeWith TF.adam loss params trainStep <- TF.minimizeWith TF.adam loss params
let correctPredictions = TF.equal predict labels let correctPredictions = TF.equal predict labels
errorRateTensor <- TF.render $ 1 - reduceMean (TF.cast correctPredictions) errorRateTensor <- TF.render $ 1 - TF.reduceMean (TF.cast correctPredictions)
return Model { return Model {
train = \imFeed lFeed -> TF.runWithFeeds_ [ train = \imFeed lFeed -> TF.runWithFeeds_ [

View file

@ -106,6 +106,8 @@ module TensorFlow.Ops
, CoreOps.range , CoreOps.range
, CoreOps.range' , CoreOps.range'
, reducedShape , reducedShape
, reduceMean
, reduceMean'
, CoreOps.relu , CoreOps.relu
, CoreOps.relu' , CoreOps.relu'
, CoreOps.reluGrad , CoreOps.reluGrad
@ -330,6 +332,23 @@ reduceSum' :: (OneOf '[ Double, Float, Int32, Int64
reduceSum' params x = CoreOps.sum' params x allAxes reduceSum' params x = CoreOps.sum' params x allAxes
where allAxes = CoreOps.range 0 (CoreOps.rank x :: Tensor Build Int32) 1 where allAxes = CoreOps.range 0 (CoreOps.rank x :: Tensor Build Int32) 1
-- | Computes the mean of elements across dimensions of a tensor.
-- See `TensorFlow.GenOps.Core.mean`
reduceMean
:: ( TensorType a
, OneOf '[ Double, Float, Complex Float, Complex Double] a
)
=> Tensor v a -> Tensor Build a
reduceMean = reduceMean' id
reduceMean'
:: ( TensorType a
, OneOf '[ Double, Float, Complex Float, Complex Double] a
)
=> OpParams -> Tensor v a -> Tensor Build a
reduceMean' params x = CoreOps.mean' params x allAxes
where allAxes = CoreOps.range 0 (CoreOps.rank x :: Tensor Build Int32) 1
-- | Create a constant vector. -- | Create a constant vector.
vector :: TensorType a => [a] -> Tensor Build a vector :: TensorType a => [a] -> Tensor Build a
vector = vector' id vector = vector' id

View file

@ -6,7 +6,7 @@ import Control.Monad (replicateM_)
import qualified Data.Vector as V import qualified Data.Vector as V
import qualified TensorFlow.Core as TF import qualified TensorFlow.Core as TF
import qualified TensorFlow.GenOps.Core as TF (square, rank) import qualified TensorFlow.GenOps.Core as TF (square)
import qualified TensorFlow.Minimize as TF import qualified TensorFlow.Minimize as TF
import qualified TensorFlow.Ops as TF hiding (initializedVariable) import qualified TensorFlow.Ops as TF hiding (initializedVariable)
import qualified TensorFlow.Variable as TF import qualified TensorFlow.Variable as TF
@ -18,9 +18,6 @@ import TensorFlow.Test (assertAllClose)
randomParam :: TF.Shape -> TF.Session (TF.Tensor TF.Value Float) randomParam :: TF.Shape -> TF.Session (TF.Tensor TF.Value Float)
randomParam (TF.Shape shape) = TF.truncatedNormal (TF.vector shape) randomParam (TF.Shape shape) = TF.truncatedNormal (TF.vector shape)
reduceMean :: TF.Tensor v Float -> TF.Tensor TF.Build Float
reduceMean xs = TF.mean xs (TF.range 0 (TF.rank xs) 1)
fitMatrix :: Test fitMatrix :: Test
fitMatrix = testCase "fitMatrix" $ TF.runSession $ do fitMatrix = testCase "fitMatrix" $ TF.runSession $ do
u <- TF.initializedVariable =<< randomParam [2, 1] u <- TF.initializedVariable =<< randomParam [2, 1]
@ -28,7 +25,7 @@ fitMatrix = testCase "fitMatrix" $ TF.runSession $ do
let ones = [1, 1, 1, 1] :: [Float] let ones = [1, 1, 1, 1] :: [Float]
matx = TF.constant [2, 2] ones matx = TF.constant [2, 2] ones
diff = matx `TF.sub` (TF.readValue u `TF.matMul` TF.readValue v) diff = matx `TF.sub` (TF.readValue u `TF.matMul` TF.readValue v)
loss = reduceMean $ TF.square diff loss = TF.reduceMean $ TF.square diff
trainStep <- TF.minimizeWith (TF.gradientDescent 0.01) loss [u, v] trainStep <- TF.minimizeWith (TF.gradientDescent 0.01) loss [u, v]
replicateM_ 1000 (TF.run trainStep) replicateM_ 1000 (TF.run trainStep)
(u',v') <- TF.run (TF.readValue u, TF.readValue v) (u',v') <- TF.run (TF.readValue u, TF.readValue v)