-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Friendly layer around TensorFlow bindings.
--
-- Please see README.md
@package tensorflow-ops
@version 0.1.0.0
-- | This module contains definitions for some built-in TensorFlow
-- operations.
--
-- Note that certain, "stateful" ops like variable and
-- assign return a Build action (e.g., Build (Tensor
-- Ref a) instead of a pure value; the returned Tensors are
-- always rendered in the current Build context. This approach
-- helps us avoid problems with inlining or common subexpression
-- elimination, by writing
--
--
-- do
-- v <- variable []
-- w <- assign v 3
-- render $ w * w
--
--
-- instead of
--
--
-- let
-- v = variable []
-- w = assign v 3
-- in w * w
--
--
-- since the latter could be reasonably transformed by the compiler into
-- (or vice versa)
--
--
-- let
-- v = variable []
-- w = assign v 3
-- w' = assign v 3
-- in w * w'
--
--
-- Ops should return a Build action if their original
-- OpDef marks them as stateful, or if they take any Refs as
-- input. (This mirrors the rules that TensorFlow uses to avoid common
-- subexpression elimination.)
module TensorFlow.Ops
-- | Returns x + y element-wise.
--
--
-- - NOTE*: Add supports broadcasting. AddN does not.
-- More about broadcasting here
--
add :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
add' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes the absolute value of a tensor.
--
-- Given a tensor x, this operation returns a tensor containing
-- the absolute value of each element in x. For example, if x is
-- an input element and y is an output element, this operation computes
-- \(y = |x|\).
abs :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t => Tensor v'1 t -> Tensor Build t
abs' :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Add all input tensors element wise.
addN :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => [Tensor v'1 t] -> Tensor Build t
addN' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => OpParams -> [Tensor v'1 t] -> Tensor Build t
-- | Returns the index with the largest value across dimensions of a
-- tensor.
argMax :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64
argMax' :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64
-- | Update ref by assigning value to it.
--
-- This operation outputs "ref" after the assignment is done. This makes
-- it easier to chain operations that need to use the reset value.
assign :: (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
assign' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
-- | Return the reduction indices for computing gradients of s0 op s1 with
-- broadcast.
--
-- This is typically used by gradient computations for a broadcasting
-- operation.
broadcastGradientArgs :: OneOf ((:) * Int32 ((:) * Int64 ([] *))) t => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
broadcastGradientArgs' :: OneOf ((:) * Int32 ((:) * Int64 ([] *))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
-- | Cast x of type SrcT to y of DstT.
cast :: (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT
cast' :: (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT
-- | Concatenates tensors along one dimension.
concat :: TensorType t => Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t
concat' :: TensorType t => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t
-- | Create a constant tensor.
--
-- The values should be in row major order, e.g.,
--
-- element 0: index (0, ..., 0) element 1: index (0, ..., 1) ...
constant :: TensorType a => Shape -> [a] -> Tensor Build a
constant' :: TensorType a => OpParams -> Shape -> [a] -> Tensor Build a
-- | Returns the truth value of (x == y) element-wise.
--
--
-- - NOTE*: Equal supports broadcasting. More about
-- broadcasting here
--
equal :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
equal' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
expandDims :: TensorType t => Tensor v1 t -> Tensor v2 Int32 -> Tensor Build t
expandDims' :: TensorType t => OpParams -> Tensor v1 t -> Tensor v2 Int32 -> Tensor Build t
-- | Creates a variable initialized to the given value. Initialization
-- happens next time session runs.
initializedVariable :: (MonadBuild m, TensorType a) => Tensor v a -> m (Tensor Ref a)
initializedVariable' :: (MonadBuild m, TensorType a) => OpParams -> Tensor v a -> m (Tensor Ref a)
-- | Creates a zero-initialized variable with the given shape.
zeroInitializedVariable :: (MonadBuild m, TensorType a, Num a) => Shape -> m (Tensor Ref a)
zeroInitializedVariable' :: (MonadBuild m, TensorType a, Num a) => OpParams -> Shape -> m (Tensor Ref a)
-- | Creates a tensor filled with a scalar value.
--
-- This operation creates a tensor of shape dims and fills it
-- with value.
--
-- For example:
--
-- ```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9)
-- ==> [[9, 9, 9] [9, 9, 9]] ```
fill :: TensorType t => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
fill' :: TensorType t => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
-- | Return a tensor with the same shape and contents as the input tensor
-- or value.
identity :: TensorType t => Tensor v'1 t -> Tensor Build t
identity' :: TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Multiply the matrix "a" by the matrix "b".
--
-- The inputs must be two-dimensional matrices and the inner dimension of
-- "a" (after being transposed if transpose_a is true) must match the
-- outer dimension of "b" (after being transposed if transposed_b is
-- true).
--
--
-- - Note*: The default kernel implementation for MatMul on GPUs uses
-- cublas.
--
matMul :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
matMul' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
matTranspose :: TensorType a => Tensor e a -> Tensor Build a
matTranspose' :: TensorType a => OpParams -> Tensor v a -> Tensor Build a
-- | Computes the mean of elements across dimensions of a tensor.
--
-- Reduces input along the dimensions given in
-- reduction_indices. Unless keep_dims is true, the
-- rank of the tensor is reduced by 1 for each entry in
-- reduction_indices. If keep_dims is true, the reduced
-- dimensions are retained with length 1.
mean :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
mean' :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
-- | Returns x * y element-wise.
--
--
-- - NOTE*: Mul supports broadcasting. More about broadcasting
-- here
--
mul :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
mul' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes numerical negative value element-wise.
--
-- I.e., \(y = -x\).
neg :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => Tensor v'1 t -> Tensor Build t
neg' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns a one-hot tensor.
--
-- The locations represented by indices in indices take value
-- on_value, while all other locations take value
-- off_value.
--
-- If the input indices is rank N, the output will have
-- rank `N+1`, The new axis is created at dimension axis
-- (default: the new axis is appended at the end).
--
-- If indices is a scalar the output shape will be a vector of
-- length depth.
--
-- If indices is a vector of length features, the
-- output shape will be: ``` features x depth if axis == -1 depth x
-- features if axis == 0 ```
--
-- If indices is a matrix (batch) with shape `[batch,
-- features]`, the output shape will be: ``` batch x features x depth if
-- axis == -1 batch x depth x features if axis == 1 depth x batch x
-- features if axis == 0 ```
--
-- Examples =========
--
-- Suppose that
--
-- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0
-- axis = -1 ```
--
-- Then output is `[4 x 3]`:
--
-- ```output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2)
-- [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ```
--
-- Suppose that
--
-- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0
-- axis = 0 ```
--
-- Then output is `[3 x 4]`:
--
-- ```output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0
-- 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^
-- one_hot(1) ``` Suppose that
--
-- ``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value =
-- 0.0 axis = -1 ```
--
-- Then output is `[2 x 2 x 3]`:
--
-- ```output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] //
-- one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] //
-- one_hot(-1) ]```
oneHot :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) => Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
oneHot' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) => OpParams -> Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
-- | Packs a list of N rank-R tensors into one
-- rank-`(R+1)` tensor.
--
-- Packs the N tensors in values into a tensor with
-- rank one higher than each tensor in values, by packing them
-- along the axis dimension. Given a list of tensors of shape
-- `(A, B, C)`;
--
-- if `axis == 0` then the output tensor will have the shape
-- `(N, A, B, C)`. if `axis == 1` then the output tensor will
-- have the shape `(A, N, B, C)`. Etc.
--
-- For example:
--
-- ```prettyprint # x is [1, 4] # y is [2, 5] #
-- z is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] #
-- Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5,
-- 6]] ```
--
-- This is the opposite of unpack.
pack :: TensorType t => [Tensor v'1 t] -> Tensor Build t
pack' :: TensorType t => OpParams -> [Tensor v'1 t] -> Tensor Build t
placeholder :: (MonadBuild m, TensorType a) => Shape -> m (Tensor Value a)
placeholder' :: (MonadBuild m, TensorType a) => OpParams -> Shape -> m (Tensor Value a)
-- | Creates a sequence of numbers.
--
-- This operation creates a sequence of numbers that begins at
-- start and extends by increments of delta up to but
-- not including limit.
--
-- For example:
--
-- ``` # start is 3 # limit is 18 # delta is 3
-- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] ```
range :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx => Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx
range' :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx => OpParams -> Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx
-- | Helper function for reduction ops (translation of
-- math_ops.reduced_shape).
reducedShape :: (OneOf '[Int32, Int64] t1, OneOf '[Int32, Int64] t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Build Int32
-- | Computes rectified linear: `max(features, 0)`.
relu :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => Tensor v'1 t -> Tensor Build t
relu' :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes rectified linear gradients for a Relu operation.
reluGrad :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
reluGrad' :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Reshapes a tensor.
--
-- Given tensor, this operation returns a tensor that has the
-- same values as tensor with shape shape.
--
-- If one component of shape is the special value -1, the size of
-- that dimension is computed so that the total size remains constant. In
-- particular, a shape of `[-1]` flattens into 1-D. At most one
-- component of shape can be -1.
--
-- If shape is 1-D or higher, then the operation returns a tensor
-- with shape shape filled with the values of tensor. In
-- this case, the number of elements implied by shape must be the
-- same as the number of elements in tensor.
--
-- For example:
--
-- ```prettyprint # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] #
-- tensor t has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3],
-- [4, 5, 6], [7, 8, 9]]
--
-- # tensor t is [[[1, 1], [2, 2]], # [[3, 3], [4, 4]]] # tensor
-- t has shape [2, 2, 2] reshape(t, [2, 4]) ==> [[1, 1, 2,
-- 2], [3, 3, 4, 4]]
--
-- # tensor t is [[[1, 1, 1], # [2, 2, 2]], # [[3, 3, 3], # [4,
-- 4, 4]], # [[5, 5, 5], # [6, 6, 6]]] # tensor t has shape [3,
-- 2, 3] # pass '[-1]' to flatten t reshape(t, [-1]) ==> [1,
-- 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
--
-- # -1 can also be used to infer the shape
--
-- # -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2,
-- 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2:
-- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5,
-- 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3])
-- ==> [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6,
-- 6, 6]]]
--
-- # tensor t is [7] # shape `[]` reshapes to a scalar
-- reshape(t, []) ==> 7 ```
reshape :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t
reshape' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t
-- | Restore a tensor's value from a checkpoint file.
restore :: (MonadBuild m, TensorType a) => ByteString -> Tensor Ref a -> m ControlNode
-- | Restore a tensor's value from a checkpoint file.
--
-- This version allows restoring from a checkpoint file that uses a
-- different tensor name than the variable.
restoreFromName :: (MonadBuild m, TensorType a) => ByteString -> ByteString -> Tensor Ref a -> m ControlNode
save :: (Rendered v, MonadBuild m, TensorType a) => ByteString -> [Tensor v a] -> m ControlNode
-- | Create a constant scalar.
scalar :: TensorType a => a -> Tensor Build a
scalar' :: TensorType a => OpParams -> a -> Tensor Build a
shape :: TensorType t => Tensor v t -> Tensor Build Int32
shape' :: TensorType t => OpParams -> Tensor v t -> Tensor Build Int32
-- | Returns an element-wise indication of the sign of a number.
--
-- `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.
--
-- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y
-- = 0`.
sign :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => Tensor v'1 t -> Tensor Build t
sign' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns the size of a tensor.
--
-- This operation returns an integer representing the number of elements
-- in input.
--
-- For example:
--
-- ```prettyprint # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3],
-- [4, 4, 4]]]] size(t) ==> 12 ```
size :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) => Tensor v'1 t -> Tensor Build out_type
size' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type
-- | Computes softmax activations.
--
-- For each batch i and class j we have
--
-- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
softmax :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => Tensor v'1 t -> Tensor Build t
softmax' :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes softmax cross entropy cost and gradients to backpropagate.
--
-- Inputs are the logits, not probabilities.
softmaxCrossEntropyWithLogits :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
softmaxCrossEntropyWithLogits' :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
-- | Converts a sparse representation into a dense tensor.
--
-- Builds an array dense with shape output_shape such
-- that
--
-- ```prettyprint # If sparse_indices is scalar dense[i] = (i ==
-- sparse_indices ? sparse_values : default_value)
--
-- # If sparse_indices is a vector, then for each i
-- dense[sparse_indices[i]] = sparse_values[i]
--
-- # If sparse_indices is an n by d matrix, then for each i in [0, n)
-- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] =
-- sparse_values[i] ```
--
-- All other values in dense are set to default_value.
-- If sparse_values is a scalar, all sparse indices are set to
-- this single value.
--
-- Indices should be sorted in lexicographic order, and indices must not
-- contain any repeats. If validate_indices is true, these
-- properties are checked during execution.
sparseToDense :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) => Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
sparseToDense' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
-- | Returns x - y element-wise.
--
--
-- - NOTE*: Sub supports broadcasting. More about broadcasting
-- here
--
sub :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
sub' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes the sum of elements across dimensions of a tensor.
--
-- Reduces input along the dimensions given in
-- reduction_indices. Unless keep_dims is true, the
-- rank of the tensor is reduced by 1 for each entry in
-- reduction_indices. If keep_dims is true, the reduced
-- dimensions are retained with length 1.
sum :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
sum' :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
-- | Shuffle dimensions of x according to a permutation.
--
-- The output y has the same rank as x. The shapes of
-- x and y satisfy: `y.shape[i] == x.shape[perm[i]] for
-- i in [0, 1, ..., rank(x) - 1]`
transpose :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) => Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t
transpose' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) => OpParams -> Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t
-- | Random tensor from the unit normal distribution with bounded values.
--
-- This is a type-restricted version of truncatedNormal.
truncatedNormal :: (MonadBuild m, OneOf '[Word16, Double, Float] a) => Tensor v Int64 -> m (Tensor Value a)
truncatedNormal' :: (MonadBuild m, OneOf '[Word16, Double, Float] a) => OpParams -> Tensor v Int64 -> m (Tensor Value a)
-- | Use VariableV2 instead.
variable :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype)
variable' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype)
-- | Create a constant vector.
vector :: TensorType a => [a] -> Tensor Build a
vector' :: TensorType a => OpParams -> [a] -> Tensor Build a
zeros :: (Num a, TensorType a) => Shape -> Tensor Build a
-- | Returns a tensor of zeros with the same shape and type as x.
zerosLike :: TensorType t => Tensor v'1 t -> Tensor Build t
zerosLike' :: TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Reshape a N-D tensor down to a scalar.
--
-- See reshape.
scalarize :: TensorType a => Tensor v a -> Tensor Build a
instance (TensorFlow.Types.TensorType a, GHC.Num.Num a, v ~ TensorFlow.Build.Build, TensorFlow.Types.OneOf '[GHC.Types.Double, GHC.Types.Float, GHC.Int.Int32, GHC.Int.Int64, Data.Complex.Complex GHC.Types.Float, Data.Complex.Complex GHC.Types.Double] a) => GHC.Num.Num (TensorFlow.Tensor.Tensor v a)
-- | Parallel lookups on the list of tensors.
module TensorFlow.EmbeddingOps
-- | Looks up ids in a list of embedding tensors.
--
-- This function is used to perform parallel lookups on the list of
-- tensors in params. It is a generalization of gather,
-- where params is interpreted as a partition of a larger
-- embedding tensor.
--
-- The partition_strategy is "mod", we assign each id to partition `p =
-- id % len(params)`. For instance, 13 ids are split across 5 partitions
-- as: `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
--
-- The results of the lookup are concatenated into a dense tensor. The
-- returned tensor has shape `shape(ids) + shape(params)[1:]`.
embeddingLookup :: (MonadBuild m, Rendered v1, TensorType a, OneOf '[Int64, Int32] b, Num b) => [Tensor v1 a] -> Tensor v2 b -> m (Tensor Value a)
module TensorFlow.Gradient
-- | Gradient of y w.r.t. each element of xs.
gradients :: (MonadBuild m, Rendered v2, GradientCompatible a) => Tensor v1 a -> [Tensor v2 a] -> m [Tensor Value a]