-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | Friendly layer around TensorFlow bindings. -- -- Please see README.md @package tensorflow-ops @version 0.3.0.0 module TensorFlow.Convolution -- | Convolution padding. data Padding -- | output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) PaddingValid :: Padding -- | output_spatial_shape[i] = ceil( (input_spatial_shape[i] - -- (spatial_filter_shape[i]-1) * dilation_rate[i]) / strides[i]) PaddingSame :: Padding -- | Matrix format. data DataFormat -- | Channel is the last dimension (e.g. NWC, NHWC, NDHWC) ChannelLast :: DataFormat -- | Channel is the first dimension after N (e.g. NCW, NCHW, NCDHW) ChannelFirst :: DataFormat -- | 2D Convolution with default parameters. conv2D :: OneOf '[Word16, Double, Float] t => Tensor v1 t -> Tensor v2 t -> Tensor Build t conv2D' :: OneOf '[Word16, Double, Float] t => OpParams -> Padding -> DataFormat -> Tensor v1 t -> Tensor v2 t -> Tensor Build t -- | 2D convolution backpropagation filter with default parameters. conv2DBackpropFilter :: OneOf '[Word16, Double, Float] t => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Build t conv2DBackpropFilter' :: OneOf '[Word16, Double, Float] t => OpParams -> Padding -> DataFormat -> Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Build t -- | 2D convolution backpropagation input with default parameters. conv2DBackpropInput :: OneOf '[Word16, Double, Float] t => Tensor v1 Int32 -> Tensor v2 t -> Tensor v3 t -> Tensor Build t conv2DBackpropInput' :: OneOf '[Word16, Double, Float] t => OpParams -> Padding -> DataFormat -> Tensor v1 Int32 -> Tensor v2 t -> Tensor v3 t -> Tensor Build t -- | 3D Convolution with default parameters. conv3D :: OneOf '[Word16, Double, Float] t => Tensor v1 t -> Tensor v2 t -> Tensor Build t conv3D' :: OneOf '[Word16, Double, Float] t => OpParams -> Padding -> DataFormat -> Tensor v1 t -> Tensor v2 t -> Tensor Build t -- | 3D convolution backpropagation filter with default parameters. conv3DBackpropFilter :: OneOf '[Word16, Double, Float] t => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Build t conv3DBackpropFilter' :: OneOf '[Word16, Double, Float] t => OpParams -> Padding -> DataFormat -> Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Build t -- | 3D convolution backpropagation filter with default parameters. conv3DBackpropFilterV2 :: OneOf '[Word16, Double, Float] t => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Build t conv3DBackpropFilterV2' :: OneOf '[Word16, Double, Float] t => OpParams -> Padding -> DataFormat -> Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Build t -- | 3D convolution backpropagation input with default parameters. conv3DBackpropInput :: OneOf '[Word16, Double, Float] t => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Build t conv3DBackpropInput' :: OneOf '[Word16, Double, Float] t => OpParams -> Padding -> DataFormat -> Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Build t -- | 3D convolution backpropagation input with default parameters. conv3DBackpropInputV2 :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tshape) => Tensor v1 tshape -> Tensor v2 t -> Tensor v3 t -> Tensor Build t conv3DBackpropInputV2' :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tshape) => OpParams -> Padding -> DataFormat -> Tensor v1 tshape -> Tensor v2 t -> Tensor v3 t -> Tensor Build t -- | Depth-wise 2D convolution native with default parameters. depthwiseConv2dNative :: OneOf '[Word16, Double, Float] t => Tensor v1 t -> Tensor v2 t -> Tensor Build t depthwiseConv2dNative' :: OneOf '[Word16, Double, Float] t => OpParams -> Padding -> DataFormat -> Tensor v1 t -> Tensor v2 t -> Tensor Build t -- | Depth-wise 2D convolution native backpropagation filter with default -- parameters. depthwiseConv2dNativeBackpropFilter :: OneOf '[Word16, Double, Float] t => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Build t depthwiseConv2dNativeBackpropFilter' :: OneOf '[Word16, Double, Float] t => OpParams -> Padding -> DataFormat -> Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Build t -- | Depth-wise 2D convolution native backpropagation input with default -- parameters. depthwiseConv2dNativeBackpropInput :: OneOf '[Word16, Double, Float] t => Tensor v1 Int32 -> Tensor v2 t -> Tensor v3 t -> Tensor Build t depthwiseConv2dNativeBackpropInput' :: OneOf '[Word16, Double, Float] t => OpParams -> Padding -> DataFormat -> Tensor v1 Int32 -> Tensor v2 t -> Tensor v3 t -> Tensor Build t -- | This module contains definitions for some built-in TensorFlow -- operations. -- -- Note that certain, "stateful" ops like variable and -- assign return a Build action (e.g., Build (Tensor -- Ref a) instead of a pure value; the returned Tensors are -- always rendered in the current Build context. This approach -- helps us avoid problems with inlining or common subexpression -- elimination, by writing -- --
--   do
--       v <- variable []
--       w <- assign v 3
--       render $ w * w
--   
-- -- instead of -- --
--   let
--      v = variable []
--      w = assign v 3
--   in w * w
--   
-- -- since the latter could be reasonably transformed by the compiler into -- (or vice versa) -- --
--   let
--      v = variable []
--      w = assign v 3
--      w' = assign v 3
--   in w * w'
--   
-- -- Ops should return a Build action if their original -- OpDef marks them as stateful, or if they take any Refs as -- input. (This mirrors the rules that TensorFlow uses to avoid common -- subexpression elimination.) module TensorFlow.Ops add :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t add' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t abs :: forall (v'1 :: Type -> Type) t. OneOf '[Int16, Int32, Int64, Int8, Word16, Double, Float] t => Tensor v'1 t -> Tensor Build t abs' :: forall (v'1 :: Type -> Type) t. OneOf '[Int16, Int32, Int64, Int8, Word16, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor Build t addN :: forall (v'1 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float, Variant] t => [Tensor v'1 t] -> Tensor Build t addN' :: forall (v'1 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float, Variant] t => OpParams -> [Tensor v'1 t] -> Tensor Build t argMax :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t tidx output_type. (OneOf '[Complex Double, Complex Float, Bool, Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx, OneOf '[Int32, Int64] output_type) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build output_type argMax' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t tidx output_type. (OneOf '[Complex Double, Complex Float, Bool, Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx, OneOf '[Int32, Int64] output_type) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build output_type assign :: forall (v'2 :: Type -> Type) t m'. (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) assign' :: forall (v'2 :: Type -> Type) t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) broadcastGradientArgs :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Int32, Int64] t => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) broadcastGradientArgs' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Int32, Int64] t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) cast :: forall (v'1 :: Type -> Type) srcT dstT. (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT cast' :: forall (v'1 :: Type -> Type) srcT dstT. (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT concat :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. TensorType t => Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t concat' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. TensorType t => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t -- | Create a constant tensor. -- -- The values should be in row major order, e.g., -- -- element 0: index (0, ..., 0) element 1: index (0, ..., 1) ... constant :: TensorType a => Shape -> [a] -> Tensor Build a constant' :: forall a. TensorType a => OpParams -> Shape -> [a] -> Tensor Build a equal :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool equal' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool expandDims :: TensorType t => Tensor v1 t -> Tensor v2 Int32 -> Tensor Build t expandDims' :: TensorType t => OpParams -> Tensor v1 t -> Tensor v2 Int32 -> Tensor Build t -- | Creates a variable initialized to the given value. Initialization -- happens next time session runs. initializedVariable :: (MonadBuild m, TensorType a) => Tensor v a -> m (Tensor Ref a) initializedVariable' :: (MonadBuild m, TensorType a) => OpParams -> Tensor v a -> m (Tensor Ref a) -- | Creates a zero-initialized variable with the given shape. zeroInitializedVariable :: (MonadBuild m, TensorType a, Num a) => Shape -> m (Tensor Ref a) zeroInitializedVariable' :: (MonadBuild m, TensorType a, Num a) => OpParams -> Shape -> m (Tensor Ref a) fill :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t index_type. (TensorType t, OneOf '[Int32, Int64] index_type) => Tensor v'1 index_type -> Tensor v'2 t -> Tensor Build t fill' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t index_type. (TensorType t, OneOf '[Int32, Int64] index_type) => OpParams -> Tensor v'1 index_type -> Tensor v'2 t -> Tensor Build t identity :: forall (v'1 :: Type -> Type) t. TensorType t => Tensor v'1 t -> Tensor Build t identity' :: forall (v'1 :: Type -> Type) t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t matMul :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t matMul' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t einsum :: forall (v'1 :: Type -> Type) t. TensorType t => ByteString -> [Tensor v'1 t] -> Tensor Build t einsum' :: forall (v'1 :: Type -> Type) t. TensorType t => OpParams -> ByteString -> [Tensor v'1 t] -> Tensor Build t matTranspose :: TensorType a => Tensor e a -> Tensor Build a matTranspose' :: TensorType a => OpParams -> Tensor v a -> Tensor Build a mean :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t tidx. (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t mean' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t tidx. (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t mul :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t mul' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t neg :: forall (v'1 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Double, Float] t => Tensor v'1 t -> Tensor Build t neg' :: forall (v'1 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor Build t oneHot :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) (v'3 :: Type -> Type) (v'4 :: Type -> Type) t tI. (TensorType t, OneOf '[Int32, Int64, Word8] tI) => Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t oneHot' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) (v'3 :: Type -> Type) (v'4 :: Type -> Type) t tI. (TensorType t, OneOf '[Int32, Int64, Word8] tI) => OpParams -> Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t pack :: forall (v'1 :: Type -> Type) t. TensorType t => [Tensor v'1 t] -> Tensor Build t pack' :: forall (v'1 :: Type -> Type) t. TensorType t => OpParams -> [Tensor v'1 t] -> Tensor Build t placeholder :: (MonadBuild m, TensorType a) => Shape -> m (Tensor Value a) placeholder' :: forall m a. (MonadBuild m, TensorType a) => OpParams -> Shape -> m (Tensor Value a) range :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) (v'3 :: Type -> Type) tidx. OneOf '[Int32, Int64, Word16, Double, Float] tidx => Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx range' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) (v'3 :: Type -> Type) tidx. OneOf '[Int32, Int64, Word16, Double, Float] tidx => OpParams -> Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx -- | Helper function for reduction ops (translation of -- math_ops.reduced_shape). reducedShape :: (OneOf '[Int32, Int64] t1, OneOf '[Int32, Int64] t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Build Int32 -- | Computes the mean of elements across dimensions of a tensor. See -- mean reduceMean :: (TensorType a, OneOf '[Double, Float, Complex Float, Complex Double] a) => Tensor v a -> Tensor Build a reduceMean' :: (TensorType a, OneOf '[Double, Float, Complex Float, Complex Double] a) => OpParams -> Tensor v a -> Tensor Build a relu :: forall (v'1 :: Type -> Type) t. OneOf '[Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t => Tensor v'1 t -> Tensor Build t relu' :: forall (v'1 :: Type -> Type) t. OneOf '[Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor Build t reluGrad :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t reluGrad' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t tanh :: forall (v'1 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Word16, Double, Float] t => Tensor v'1 t -> Tensor Build t tanhGrad :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Word16, Double, Float] t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t reshape :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t tshape. (TensorType t, OneOf '[Int32, Int64] tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t reshape' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t tshape. (TensorType t, OneOf '[Int32, Int64] tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t -- | Restore a tensor's value from a checkpoint file. restore :: forall a m. (MonadBuild m, TensorType a) => ByteString -> Tensor Ref a -> m ControlNode -- | Restore a tensor's value from a checkpoint file. -- -- This version allows restoring from a checkpoint file that uses a -- different tensor name than the variable. restoreFromName :: forall a m. (MonadBuild m, TensorType a) => ByteString -> ByteString -> Tensor Ref a -> m ControlNode save :: forall a m v. (Rendered (Tensor v), MonadBuild m, TensorType a) => ByteString -> [Tensor v a] -> m ControlNode -- | Create a constant scalar. scalar :: TensorType a => a -> Tensor Build a scalar' :: TensorType a => OpParams -> a -> Tensor Build a shape :: TensorType t => Tensor v t -> Tensor Build Int32 shape' :: TensorType t => OpParams -> Tensor v t -> Tensor Build Int32 sigmoid :: forall (v'1 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Word16, Double, Float] t => Tensor v'1 t -> Tensor Build t sigmoidGrad :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Word16, Double, Float] t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t sign :: forall (v'1 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t => Tensor v'1 t -> Tensor Build t sign' :: forall (v'1 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor Build t size :: forall (v'1 :: Type -> Type) t out_type. (TensorType t, OneOf '[Int32, Int64] out_type) => Tensor v'1 t -> Tensor Build out_type size' :: forall (v'1 :: Type -> Type) t out_type. (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type softmax :: forall (v'1 :: Type -> Type) t. OneOf '[Word16, Double, Float] t => Tensor v'1 t -> Tensor Build t softmax' :: forall (v'1 :: Type -> Type) t. OneOf '[Word16, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor Build t softmaxCrossEntropyWithLogits :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Word16, Double, Float] t => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) softmaxCrossEntropyWithLogits' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Word16, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) sparseToDense :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) (v'3 :: Type -> Type) (v'4 :: Type -> Type) t tindices. (TensorType t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t sparseToDense' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) (v'3 :: Type -> Type) (v'4 :: Type -> Type) t tindices. (TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t sub :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word32, Word8, Double, Float] t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t sub' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t. OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word32, Word8, Double, Float] t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t sum :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t tidx. (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t sum' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t tidx. (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -- | Sum a tensor down to a scalar Seee sum reduceSum :: OneOf '[Double, Float, Int32, Int64, Complex Float, Complex Double] a => Tensor v a -> Tensor Build a reduceSum' :: OneOf '[Double, Float, Int32, Int64, Complex Float, Complex Double] a => OpParams -> Tensor v a -> Tensor Build a transpose :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t tperm. (TensorType t, OneOf '[Int32, Int64] tperm) => Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t transpose' :: forall (v'1 :: Type -> Type) (v'2 :: Type -> Type) t tperm. (TensorType t, OneOf '[Int32, Int64] tperm) => OpParams -> Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t -- | Random tensor from the unit normal distribution with bounded values. -- -- This is a type-restricted version of truncatedNormal. truncatedNormal :: (MonadBuild m, OneOf '[Word16, Double, Float] a) => Tensor v Int64 -> m (Tensor Value a) truncatedNormal' :: (MonadBuild m, OneOf '[Word16, Double, Float] a) => OpParams -> Tensor v Int64 -> m (Tensor Value a) variable :: forall dtype m'. (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) variable' :: forall dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) -- | Create a constant vector. vector :: TensorType a => [a] -> Tensor Build a vector' :: TensorType a => OpParams -> [a] -> Tensor Build a zeros :: forall a. (Num a, TensorType a) => Shape -> Tensor Build a zerosLike :: forall (v'1 :: Type -> Type) t. TensorType t => Tensor v'1 t -> Tensor Build t zerosLike' :: forall (v'1 :: Type -> Type) t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t -- | Reshape a N-D tensor down to a scalar. -- -- See reshape. scalarize :: TensorType a => Tensor v a -> Tensor Build a instance (TensorFlow.Types.TensorType a, GHC.Num.Num a, v GHC.Types.~ TensorFlow.Build.Build, TensorFlow.Types.OneOf '[GHC.Types.Double, GHC.Types.Float, GHC.Int.Int32, GHC.Int.Int64, Data.Complex.Complex GHC.Types.Float, Data.Complex.Complex GHC.Types.Double] a) => GHC.Num.Num (TensorFlow.Tensor.Tensor v a) module TensorFlow.NN -- | Computes sigmoid cross entropy given logits. -- -- Measures the probability error in discrete classification tasks in -- which each class is independent and not mutually exclusive. For -- instance, one could perform multilabel classification where a picture -- can contain both an elephant and a dog at the same time. -- -- For brevity, let `x = logits`, `z = targets`. The logistic loss is -- -- z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = z * -log(1 -- (1 + exp(-x))) + (1 - z) * -log(exp(-x) (1 + exp(-x))) = z * -- log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = z * -- log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + -- log(1 + exp(-x)) = x - x * z + log(1 + exp(-x)) -- -- For x < 0, to avoid overflow in exp(-x), we reformulate the above -- -- x - x * z + log(1 + exp(-x)) = log(exp(x)) - x * z + log(1 + exp(-x)) -- = - x * z + log(1 + exp(x)) -- -- Hence, to ensure stability and avoid overflow, the implementation uses -- this equivalent formulation -- -- max(x, 0) - x * z + log(1 + exp(-abs(x))) -- -- logits and targets must have the same type and -- shape. sigmoidCrossEntropyWithLogits :: (MonadBuild m, OneOf '[Float, Double] a, TensorType a, Num a) => Tensor Value a -> Tensor Value a -> m (Tensor Value a) module TensorFlow.Gradient type GradientCompatible a = (Num a, OneOf '[Float, Complex Float, Complex Double] a) -- | Gradient of y w.r.t. each element of xs. gradients :: forall a v1 t m. (MonadBuild m, Rendered t, ToTensor t, GradientCompatible a) => Tensor v1 a -> [t a] -> m [Tensor Value a] -- | Parallel lookups on the list of tensors. module TensorFlow.EmbeddingOps -- | Looks up ids in a list of embedding tensors. -- -- This function is used to perform parallel lookups on the list of -- tensors in params. It is a generalization of gather, -- where params is interpreted as a partition of a larger -- embedding tensor. -- -- The partition_strategy is "mod", we assign each id to partition `p = -- id % len(params)`. For instance, 13 ids are split across 5 partitions -- as: `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]` -- -- The results of the lookup are concatenated into a dense tensor. The -- returned tensor has shape `shape(ids) + shape(params)[1:]`. embeddingLookup :: forall a b v1 v2 m. (MonadBuild m, Rendered (Tensor v1), TensorType a, OneOf '[Int64, Int32] b, Num b) => [Tensor v1 a] -> Tensor v2 b -> m (Tensor Value a) -- | Queues in TensorFlow graph. Very limited support for now. module TensorFlow.Queue -- | A queue carrying tuples. data Queue (as :: [*]) -- | Creates a new queue with the given capacity and shared name. makeQueue :: forall as m. (MonadBuild m, TensorTypes as) => Int64 -> ByteString -> m (Queue as) -- | Adds the given values to the queue. enqueue :: forall as v m. (MonadBuild m, TensorTypes as) => Queue as -> TensorList v as -> m ControlNode -- | Retrieves the values from the queue. dequeue :: forall as m. (MonadBuild m, TensorTypes as) => Queue as -> m (TensorList Value as) -- | An implementation of ResourceHandle-based variables. -- -- The main difference between this and Ref-based variables is -- that reads are explicit, via the readValue op. -- -- TODO: given that distinction, figure out a good story around gradients -- and save/restore. Then, merge this module into TensorFlow.Ops. module TensorFlow.Variable data Variable a -- | Creates a new, uninitialized variable. variable :: (MonadBuild m, TensorType a) => Shape -> m (Variable a) variable' :: forall m a. (MonadBuild m, TensorType a) => OpParams -> Shape -> m (Variable a) -- | Gets the value stored in a variable. -- -- Note that this op is stateful since it depends on the value of the -- variable; however, it may be CSE'd with other reads in the same -- context. The context can be fixed by using render along with -- (for example) withControlDependencies. For example: -- --
--   runSession $ do
--     v <- variable []
--     a <- assign v 24
--     r <- withControlDependencies a $ render $ readValue v + 18
--     result <- run r
--     liftIO $ (42 :: Float) @=? unScalar result
--   
readValue :: TensorType a => Variable a -> Tensor Build a -- | The initial value of a Variable created with -- initializedVariable. initializedValue :: Variable a -> Maybe (Tensor Value a) -- | Creates a variable initialized to the given value. Initialization -- happens next time session runs. initializedVariable :: (MonadBuild m, TensorType a) => Tensor v a -> m (Variable a) initializedVariable' :: forall a m v. (MonadBuild m, TensorType a) => OpParams -> Tensor v a -> m (Variable a) -- | Creates a zero-initialized variable with the given shape. zeroInitializedVariable :: (MonadBuild m, TensorType a, Num a) => Shape -> m (Variable a) zeroInitializedVariable' :: (MonadBuild m, TensorType a, Num a) => OpParams -> Shape -> m (Variable a) -- | Sets the value of a variable. assign :: (MonadBuild m, TensorType a) => Variable a -> Tensor v a -> m ControlNode assign' :: (MonadBuild m, TensorType a) => OpParams -> Variable a -> Tensor v a -> m ControlNode -- | Increments the value of a variable. assignAdd :: (MonadBuild m, TensorType a) => Variable a -> Tensor v a -> m ControlNode assignAdd' :: (MonadBuild m, TensorType a) => OpParams -> Variable a -> Tensor v a -> m ControlNode -- | Update '*var' according to the Adam algorithm. -- -- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- -- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - -- beta2) * g_t * g_t variable <- variable - lr_t * m_t / (sqrt(v_t) + -- epsilon) resourceApplyAdam :: (MonadBuild m, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Variable t -> Variable t -> Variable t -> Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> m ControlNode resourceApplyAdam' :: (MonadBuild m, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Variable t -> Variable t -> Variable t -> Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> m ControlNode instance TensorFlow.Tensor.Rendered TensorFlow.Variable.Variable instance TensorFlow.Tensor.ToTensor TensorFlow.Variable.Variable module TensorFlow.Minimize -- | Functions that minimize a loss w.r.t. a set of Variables. -- -- Generally only performs one step of an iterative algorithm. -- -- Minimizers are defined as a function of the gradients instead -- of the loss so that users can apply transformations to the gradients. type Minimizer a = forall m. MonadBuild m => [Variable a] -> [Tensor Value a] -> m ControlNode -- | Convenience wrapper around gradients and a Minimizer. minimizeWith :: (MonadBuild m, GradientCompatible a) => Minimizer a -> Tensor v a -> [Variable a] -> m ControlNode -- | Perform one step of the gradient descent algorithm. gradientDescent :: GradientCompatible a => a -> Minimizer a type OneOfAdamDataTypes t = OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word32, Word64, Word8, Double, Float] t data AdamConfig t AdamConfig :: t -> t -> t -> t -> AdamConfig t [adamLearningRate] :: AdamConfig t -> t [adamBeta1] :: AdamConfig t -> t [adamBeta2] :: AdamConfig t -> t [adamEpsilon] :: AdamConfig t -> t -- | Perform one step of the adam algorithm. -- -- See https://arxiv.org/abs/1412.6980. -- -- NOTE: Currently requires all Variables to have an -- initializedValue. adam :: (OneOfAdamDataTypes t, Fractional t) => Minimizer t adam' :: OneOfAdamDataTypes t => AdamConfig t -> Minimizer t instance GHC.Real.Fractional t => Data.Default.Class.Default (TensorFlow.Minimize.AdamConfig t)