1
0
mirror of https://github.com/tensorflow/haskell.git synced 2024-06-02 19:13:34 +02:00
tensorflow-haskell/docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt
2017-04-08 07:14:47 -07:00

7083 lines
406 KiB
Plaintext

-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Haskell wrappers for Core Tensorflow Ops.
--
-- Code generated signatures for the Ops in libtensorflow.
@package tensorflow-core-ops
@version 0.1.0.0
module TensorFlow.GenOps.Core
-- | Raise a exception to abort the process when called. If
-- exit_without_error is true, the process will exit normally, otherwise
-- it will exit with a SIGABORT signal.
--
-- Returns nothing but an exception.
abort :: (MonadBuild m') => m' (ControlNode)
abort' :: (MonadBuild m') => OpParams -> m' (ControlNode)
-- | Computes the absolute value of a tensor.
--
-- Given a tensor <tt>x</tt>, this operation returns a tensor containing
-- the absolute value of each element in <tt>x</tt>. For example, if x is
-- an input element and y is an output element, this operation computes
-- \(y = |x|\).
abs :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
abs' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Applies a gradient to a given accumulator. Does not add if local_step
-- is lesser
--
-- than the accumulator's global_step.
accumulatorApplyGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 dtype -> m' (ControlNode)
accumulatorApplyGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 dtype -> m' (ControlNode)
-- | Returns the number of gradients aggregated in the given accumulators.
accumulatorNumAccumulated :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32)
accumulatorNumAccumulated' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32)
-- | Updates the accumulator with a new value for global_step. Logs warning
-- if the
--
-- accumulator's value is already higher than new_global_step.
accumulatorSetGlobalStep :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 Int64 -> m' (ControlNode)
accumulatorSetGlobalStep' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int64 -> m' (ControlNode)
-- | Extracts the average gradient in the given ConditionalAccumulator,
-- provided
--
-- that sufficient (i.e., more than num_required) gradients have been
-- accumulated. The op blocks until sufficient gradients have been
-- accumulated. If the accumulator has already aggregated more than
-- num_required gradients, it returns the average of the accumulated
-- gradients. Also automatically increments the recorded global_step in
-- the accumulator by 1, and resets the aggregate to 0.
accumulatorTakeGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value dtype)
accumulatorTakeGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value dtype)
-- | Computes acos of x element-wise.
acos :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
acos' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns x + y element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>Add</tt> supports broadcasting. <tt>AddN</tt> does not.
-- More about broadcasting <a>here</a></li>
-- </ul>
add :: (OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
add' :: (OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Add an <tt>N</tt>-minibatch <tt>SparseTensor</tt> to a
-- <tt>SparseTensorsMap</tt>, return <tt>N</tt> handles.
--
-- A <tt>SparseTensor</tt> of rank <tt>R</tt> is represented by three
-- tensors: <tt>sparse_indices</tt>, <tt>sparse_values</tt>, and
-- <tt>sparse_shape</tt>, where
--
-- ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
--
-- An <tt>N</tt>-minibatch of <tt>SparseTensor</tt> objects is
-- represented as a <tt>SparseTensor</tt> having a first
-- <tt>sparse_indices</tt> column taking values between `[0, N)`, where
-- the minibatch size `N == sparse_shape[0]`.
--
-- The input <tt>SparseTensor</tt> must have rank <tt>R</tt> greater than
-- 1, and the first dimension is treated as the minibatch dimension.
-- Elements of the <tt>SparseTensor</tt> must be sorted in increasing
-- order of this first dimension. The stored <tt>SparseTensor</tt>
-- objects pointed to by each row of the output <tt>sparse_handles</tt>
-- will have rank `R-1`.
--
-- The <tt>SparseTensor</tt> values can then be read out as part of a
-- minibatch by passing the given keys as vector elements to
-- <tt>TakeManySparseFromTensorsMap</tt>. To ensure the correct
-- <tt>SparseTensorsMap</tt> is accessed, ensure that the same
-- <tt>container</tt> and <tt>shared_name</tt> are passed to that Op. If
-- no <tt>shared_name</tt> is provided here, instead use the *name* of
-- the Operation created by calling <tt>AddManySparseToTensorsMap</tt> as
-- the <tt>shared_name</tt> passed to
-- <tt>TakeManySparseFromTensorsMap</tt>. Ensure the Operations are
-- colocated.
addManySparseToTensorsMap :: (MonadBuild m', TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64)
addManySparseToTensorsMap' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64)
-- | Add all input tensors element wise.
addN :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => [Tensor v'1 t] -> Tensor Build t
addN' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> [Tensor v'1 t] -> Tensor Build t
-- | Add a <tt>SparseTensor</tt> to a <tt>SparseTensorsMap</tt> return its
-- handle.
--
-- A <tt>SparseTensor</tt> is represented by three tensors:
-- <tt>sparse_indices</tt>, <tt>sparse_values</tt>, and
-- <tt>sparse_shape</tt>.
--
-- This operator takes the given <tt>SparseTensor</tt> and adds it to a
-- container object (a <tt>SparseTensorsMap</tt>). A unique key within
-- this container is generated in the form of an <tt>int64</tt>, and this
-- is the value that is returned.
--
-- The <tt>SparseTensor</tt> can then be read out as part of a minibatch
-- by passing the key as a vector element to
-- <tt>TakeManySparseFromTensorsMap</tt>. To ensure the correct
-- <tt>SparseTensorsMap</tt> is accessed, ensure that the same
-- <tt>container</tt> and <tt>shared_name</tt> are passed to that Op. If
-- no <tt>shared_name</tt> is provided here, instead use the *name* of
-- the Operation created by calling <tt>AddSparseToTensorsMap</tt> as the
-- <tt>shared_name</tt> passed to <tt>TakeManySparseFromTensorsMap</tt>.
-- Ensure the Operations are colocated.
addSparseToTensorsMap :: (MonadBuild m', TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64)
addSparseToTensorsMap' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64)
-- | Deprecated. Disallowed in GraphDef version &gt;= 2.
adjustContrast :: (OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor Build Float
adjustContrast' :: (OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor Build Float
-- | Adjust the contrast of one or more images.
--
-- <tt>images</tt> is a tensor of at least 3 dimensions. The last 3
-- dimensions are interpreted as `[height, width, channels]`. The other
-- dimensions only represent a collection of images, such as `[batch,
-- height, width, channels].`
--
-- Contrast is adjusted independently for each channel of each image.
--
-- For each channel, the Op first computes the mean of the image pixels
-- in the channel and then adjusts each component of each pixel to `(x -
-- mean) * contrast_factor + mean`.
adjustContrastv2 :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
adjustContrastv2' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
-- | Adjust the hue of one or more images.
--
-- <tt>images</tt> is a tensor of at least 3 dimensions. The last
-- dimension is interpretted as channels, and must be three.
--
-- The input image is considered in the RGB colorspace. Conceptually, the
-- RGB colors are first mapped into HSV. A delta is then applied all the
-- hue values, and then remapped back to RGB colorspace.
adjustHue :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
adjustHue' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
-- | Adjust the saturation of one or more images.
--
-- <tt>images</tt> is a tensor of at least 3 dimensions. The last
-- dimension is interpretted as channels, and must be three.
--
-- The input image is considered in the RGB colorspace. Conceptually, the
-- RGB colors are first mapped into HSV. A scale is then applied all the
-- saturation values, and then remapped back to RGB colorspace.
adjustSaturation :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
adjustSaturation' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
-- | Computes the "logical and" of elements across dimensions of a tensor.
--
-- Reduces <tt>input</tt> along the dimensions given in
-- <tt>reduction_indices</tt>. Unless <tt>keep_dims</tt> is true, the
-- rank of the tensor is reduced by 1 for each entry in
-- <tt>reduction_indices</tt>. If <tt>keep_dims</tt> is true, the reduced
-- dimensions are retained with length 1.
all :: (OneOf '[Int32, Int64] tidx) => Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool
all' :: (OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool
-- | Generates labels for candidate sampling with a learned unigram
-- distribution.
--
-- See explanations of candidate sampling and the data formats at
-- go/candidate-sampling.
--
-- For each batch, this op picks a single set of sampled candidate
-- labels.
--
-- The advantages of sampling candidates per-batch are simplicity and the
-- possibility of efficient dense matrix multiplication. The disadvantage
-- is that the sampled candidates must be chosen independently of the
-- context and of the true labels.
allCandidateSampler :: Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
allCandidateSampler' :: OpParams -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
-- | Computes the "logical or" of elements across dimensions of a tensor.
--
-- Reduces <tt>input</tt> along the dimensions given in
-- <tt>reduction_indices</tt>. Unless <tt>keep_dims</tt> is true, the
-- rank of the tensor is reduced by 1 for each entry in
-- <tt>reduction_indices</tt>. If <tt>keep_dims</tt> is true, the reduced
-- dimensions are retained with length 1.
any :: (OneOf '[Int32, Int64] tidx) => Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool
any' :: (OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool
-- | Update '*var' according to the adadelta scheme.
--
-- accum = rho() * accum + (1 - rho()) * grad.square(); update =
-- (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
-- update_accum = rho() * update_accum + (1 - rho()) * update.square();
-- var -= update;
applyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (Tensor Ref t)
applyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (Tensor Ref t)
-- | Update '*var' according to the adagrad scheme.
--
-- accum += grad * grad var -= lr * grad * (1 / sqrt(accum))
applyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> m' (Tensor Ref t)
applyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> m' (Tensor Ref t)
-- | Update '*var' according to the proximal adagrad scheme.
applyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (Tensor Ref t)
applyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (Tensor Ref t)
-- | Update '*var' according to the Adam algorithm.
--
-- lr_t &lt;- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t &lt;-
-- beta1 * m_{t-1} + (1 - beta1) * g_t v_t &lt;- beta2 * v_{t-1} + (1 -
-- beta2) * g_t * g_t variable &lt;- variable - lr_t * m_t / (sqrt(v_t) +
-- epsilon)
applyAdam :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (Tensor Ref t)
applyAdam' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (Tensor Ref t)
-- | Update '*var' according to the centered RMSProp algorithm.
--
-- The centered RMSProp algorithm uses an estimate of the centered second
-- moment (i.e., the variance) for normalization, as opposed to regular
-- RMSProp, which uses the (uncentered) second moment. This often helps
-- with training, but is slightly more expensive in terms of computation
-- and memory.
--
-- Note that in dense implementation of this algorithm, mg, ms, and mom
-- will update even if the grad is zero, but in this sparse
-- implementation, mg, ms, and mom will not update in iterations during
-- which the grad is zero.
--
-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
-- mean_grad = decay * mean_grad + (1-decay) * gradient
--
-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon -
-- mean_grad ** 2)
--
-- mg &lt;- rho * mg_{t-1} + (1-rho) * grad ms &lt;- rho * ms_{t-1} +
-- (1-rho) * grad * grad mom &lt;- momentum * mom_{t-1} + lr * grad /
-- sqrt(ms - mg * mg + epsilon) var &lt;- var - mom
applyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t)
applyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t)
-- | Update '*var' according to the Ftrl-proximal scheme.
--
-- accum_new = accum + grad * grad linear += grad +
-- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0
-- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 -
-- linear) / quadratic if |linear| &gt; l1 else 0.0 accum = accum_new
applyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t)
applyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t)
-- | Update '*var' by subtracting <tt>alpha</tt> * <tt>delta</tt> from it.
applyGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> m' (Tensor Ref t)
applyGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> m' (Tensor Ref t)
-- | Update '*var' according to the momentum scheme. Set use_nesterov =
-- True if you
--
-- want to use Nesterov momentum.
--
-- accum = accum * momentum + grad var -= lr * accum
applyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t)
applyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t)
-- | Update '*var' and '*accum' according to FOBOS with Adagrad learning
-- rate.
--
-- accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var
-- = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
applyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (Tensor Ref t)
applyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (Tensor Ref t)
-- | Update '*var' as FOBOS algorithm with fixed learning rate.
--
-- prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) *
-- max{|prox_v|-alpha*l1,0}
applyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t)
applyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t)
-- | Update '*var' according to the RMSProp algorithm.
--
-- Note that in dense implementation of this algorithm, ms and mom will
-- update even if the grad is zero, but in this sparse implementation, ms
-- and mom will not update in iterations during which the grad is zero.
--
-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta =
-- learning_rate * gradient / sqrt(mean_square + epsilon)
--
-- ms &lt;- rho * ms_{t-1} + (1-rho) * grad * grad mom &lt;- momentum *
-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var &lt;- var - mom
applyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t)
applyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t)
-- | Returns the index with the largest value across dimensions of a
-- tensor.
argMax :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64
argMax' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64
-- | Returns the index with the smallest value across dimensions of a
-- tensor.
argMin :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64
argMin' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64
-- | Converts each entry in the given tensor to strings. Supports many
-- numeric
--
-- types and boolean.
asString :: (OneOf '[Complex Float, Bool, Int32, Int64, Int8, Double, Float] t) => Tensor v'1 t -> Tensor Build ByteString
asString' :: (OneOf '[Complex Float, Bool, Int32, Int64, Int8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString
-- | Computes asin of x element-wise.
asin :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
asin' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Asserts that the given condition is true.
--
-- If <tt>condition</tt> evaluates to false, print the list of tensors in
-- `data`. <tt>summarize</tt> determines how many entries of the tensors
-- to print.
assert :: (MonadBuild m', TensorTypes t) => Tensor v'1 Bool -> TensorList (v'2) t -> m' (ControlNode)
assert' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 Bool -> TensorList (v'2) t -> m' (ControlNode)
-- | Update <tt>ref</tt> by assigning <a>value</a> to it.
--
-- This operation outputs "ref" after the assignment is done. This makes
-- it easier to chain operations that need to use the reset value.
assign :: (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
assign' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
-- | Update <tt>ref</tt> by adding <a>value</a> to it.
--
-- This operation outputs "ref" after the update is done. This makes it
-- easier to chain operations that need to use the reset value.
assignAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
assignAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
-- | Adds a value to the current value of a variable.
--
-- Any ReadVariableOp which depends directly or indirectly on this assign
-- is guaranteed to see the incremented value or a subsequent newer one.
--
-- Outputs the incremented value, which can be used to totally order the
-- increments to this variable.
assignAddVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode)
assignAddVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode)
-- | Update <tt>ref</tt> by subtracting <a>value</a> from it.
--
-- This operation outputs "ref" after the update is done. This makes it
-- easier to chain operations that need to use the reset value.
assignSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
assignSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
-- | Assigns a new value to a variable.
--
-- Any ReadVariableOp with a control dependency on this op is guaranteed
-- to return this value or a subsequent newer value of the variable.
assignVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode)
assignVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode)
-- | Computes atan of x element-wise.
atan :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
atan' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Outputs a <tt>Summary</tt> protocol buffer with audio.
--
-- The summary has up to <tt>max_outputs</tt> summary values containing
-- audio. The audio is built from <tt>tensor</tt> which must be 3-D with
-- shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size,
-- frames]`. The values are assumed to be in the range of `[-1.0, 1.0]`
-- with a sample rate of <tt>sample_rate</tt>.
--
-- The <tt>tag</tt> argument is a scalar <a>Tensor</a> of type
-- <tt>string</tt>. It is used to build the <tt>tag</tt> of the summary
-- values:
--
-- <ul>
-- <li>If <tt>max_outputs</tt> is 1, the summary value tag is
-- '*tag*/audio'.</li>
-- <li>If <tt>max_outputs</tt> is greater than 1, the summary value tags
-- are generated sequentially as '*tag*/audio/0', '*tag*/audio/1',
-- etc.</li>
-- </ul>
audioSummary :: Float -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build ByteString
audioSummary' :: OpParams -> Float -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build ByteString
-- | Outputs a <tt>Summary</tt> protocol buffer with audio.
--
-- The summary has up to <tt>max_outputs</tt> summary values containing
-- audio. The audio is built from <tt>tensor</tt> which must be 3-D with
-- shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size,
-- frames]`. The values are assumed to be in the range of `[-1.0, 1.0]`
-- with a sample rate of <tt>sample_rate</tt>.
--
-- The <tt>tag</tt> argument is a scalar <a>Tensor</a> of type
-- <tt>string</tt>. It is used to build the <tt>tag</tt> of the summary
-- values:
--
-- <ul>
-- <li>If <tt>max_outputs</tt> is 1, the summary value tag is
-- '*tag*/audio'.</li>
-- <li>If <tt>max_outputs</tt> is greater than 1, the summary value tags
-- are generated sequentially as '*tag*/audio/0', '*tag*/audio/1',
-- etc.</li>
-- </ul>
audioSummaryV2 :: Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build ByteString
audioSummaryV2' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build ByteString
-- | Performs average pooling on the input.
--
-- Each entry in <tt>output</tt> is the mean of the corresponding size
-- <tt>ksize</tt> window in <a>value</a>.
avgPool :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
avgPool' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Performs 3D average pooling on the input.
avgPool3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t
avgPool3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes gradients of average pooling function.
avgPool3DGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
avgPool3DGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
-- | Computes gradients of the average pooling function.
avgPoolGrad :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
avgPoolGrad' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
-- | Defines a barrier that persists across different graph executions.
--
-- A barrier represents a key-value map, where each key is a string, and
-- each value is a tuple of tensors.
--
-- At runtime, the barrier contains <tt>complete</tt> and
-- <tt>incomplete</tt> elements. A complete element has defined tensors
-- for all components of its value tuple, and may be accessed using
-- BarrierTakeMany. An incomplete element has some undefined components
-- in its value tuple, and may be updated using BarrierInsertMany.
barrier :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString)
barrier' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString)
-- | Closes the given barrier.
--
-- This operation signals that no more new elements will be inserted in
-- the given barrier. Subsequent InsertMany that try to introduce a new
-- key will fail. Subsequent InsertMany operations that just add missing
-- components to already existing elements will continue to succeed.
-- Subsequent TakeMany operations will continue to succeed if sufficient
-- completed elements remain in the barrier. Subsequent TakeMany
-- operations that would block will fail immediately.
barrierClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode)
barrierClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode)
-- | Computes the number of incomplete elements in the given barrier.
barrierIncompleteSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32)
barrierIncompleteSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32)
-- | For each key, assigns the respective value to the specified component.
--
-- If a key is not found in the barrier, this operation will create a new
-- incomplete element. If a key is found in the barrier, and the element
-- already has a value at component_index, this operation will fail with
-- INVALID_ARGUMENT, and leave the barrier in an undefined state.
barrierInsertMany :: (MonadBuild m', TensorType t) => Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> Tensor v'3 t -> m' (ControlNode)
barrierInsertMany' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> Tensor v'3 t -> m' (ControlNode)
-- | Computes the number of complete elements in the given barrier.
barrierReadySize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32)
barrierReadySize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32)
-- | Takes the given number of completed elements from a barrier.
--
-- This operation concatenates completed-element component tensors along
-- the 0th dimension to make a single component tensor.
--
-- Elements come out of the barrier when they are complete, and in the
-- order in which they were placed into the barrier. The indices output
-- provides information about the batch in which each element was
-- originally inserted into the barrier.
barrierTakeMany :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value ByteString, TensorList (Value) component_types))
barrierTakeMany' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value ByteString, TensorList (Value) component_types))
batchCholesky :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
batchCholesky' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
batchCholeskyGrad :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
batchCholeskyGrad' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
batchFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
batchFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
batchFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
batchFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
batchFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
batchFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
batchIFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
batchIFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
batchIFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
batchIFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
batchIFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
batchIFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
-- | Multiplies slices of two tensors in batches.
--
-- Multiplies all slices of <a>Tensor</a> <tt>x</tt> and <tt>y</tt> (each
-- slice can be viewed as an element of a batch), and arranges the
-- individual results in a single output tensor of the same batch size.
-- Each of the individual slices can optionally be adjointed (to adjoint
-- a matrix means to transpose and conjugate it) before multiplication by
-- setting the <tt>adj_x</tt> or <tt>adj_y</tt> flag to <a>True</a>,
-- which are by default <a>False</a>.
--
-- The input tensors <tt>x</tt> and <tt>y</tt> are 3-D or higher with
-- shape `[..., r_x, c_x]` and `[..., r_y, c_y]`.
--
-- The output tensor is 3-D or higher with shape `[..., r_o, c_o]`,
-- where:
--
-- r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y
--
-- It is computed as:
--
-- output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
batchMatMul :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
batchMatMul' :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
batchMatrixBandPart :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t
batchMatrixBandPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t
batchMatrixDeterminant :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
batchMatrixDeterminant' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
batchMatrixDiag :: (TensorType t) => Tensor v'1 t -> Tensor Build t
batchMatrixDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
batchMatrixDiagPart :: (TensorType t) => Tensor v'1 t -> Tensor Build t
batchMatrixDiagPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
batchMatrixInverse :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
batchMatrixInverse' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
batchMatrixSetDiag :: (TensorType t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
batchMatrixSetDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
batchMatrixSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
batchMatrixSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
batchMatrixSolveLs :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t
batchMatrixSolveLs' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t
batchMatrixTriangularSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
batchMatrixTriangularSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Batch normalization.
--
-- This op is deprecated. Prefer `tf.nn.batch_normalization`.
batchNormWithGlobalNormalization :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor Build t
batchNormWithGlobalNormalization' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor Build t
-- | Gradients for batch normalization.
--
-- This op is deprecated. See `tf.nn.batch_normalization`.
batchNormWithGlobalNormalizationGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
batchNormWithGlobalNormalizationGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
batchSelfAdjointEig :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
batchSelfAdjointEig' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
batchSelfAdjointEigV2 :: (OneOf '[Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t)
batchSelfAdjointEigV2' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t)
batchSvd :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t)
batchSvd' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t)
-- | BatchToSpace for 4-D tensors of type T.
--
-- This is a legacy version of the more general BatchToSpaceND.
--
-- Rearranges (permutes) data from batch into blocks of spatial data,
-- followed by cropping. This is the reverse transformation of
-- SpaceToBatch. More specifically, this op outputs a copy of the input
-- tensor where values from the <tt>batch</tt> dimension are moved in
-- spatial blocks to the <tt>height</tt> and <tt>width</tt> dimensions,
-- followed by cropping along the <tt>height</tt> and <tt>width</tt>
-- dimensions.
batchToSpace :: (TensorType t, OneOf '[Int32, Int64] tidx) => Int64 -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
batchToSpace' :: (TensorType t, OneOf '[Int32, Int64] tidx) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
-- | BatchToSpace for N-D tensors of type T.
--
-- This operation reshapes the "batch" dimension 0 into `M + 1`
-- dimensions of shape `block_shape + [batch]`, interleaves these blocks
-- back into the grid defined by the spatial dimensions `[1, ..., M]`, to
-- obtain a result with the same rank as the input. The spatial
-- dimensions of this intermediate result are then optionally cropped
-- according to <tt>crops</tt> to produce the output. This is the reverse
-- of SpaceToBatch. See below for a precise description.
batchToSpaceND :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tcrops) => Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tcrops -> Tensor Build t
batchToSpaceND' :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tcrops) => OpParams -> Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tcrops -> Tensor Build t
-- | Compute the regularized incomplete beta integral \(I_x(a, b)\).
--
-- The regularized incomplete beta integral is defined as:
--
-- ``` I_x(a, b) = frac{B(x; a, b)}{B(a, b)} ``` where
--
-- ``` B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt ```
--
-- is the incomplete beta function and \(B(a, b)\) is the *complete* beta
-- function.
betainc :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
betainc' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
-- | Adds <tt>bias</tt> to <a>value</a>.
--
-- This is a special case of `tf.add` where <tt>bias</tt> is restricted
-- to be 1-D. Broadcasting is supported, so <a>value</a> may have any
-- number of dimensions.
biasAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
biasAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | The backward operation for <a>BiasAdd</a> on the "bias" tensor.
--
-- It accumulates all the values from out_backprop into the feature
-- dimension. For NHWC data format, the feature dimension is the last.
-- For NCHW data format, the feature dimension is the third-to-last.
biasAddGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t
biasAddGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Adds <tt>bias</tt> to <a>value</a>.
--
-- This is a deprecated version of BiasAdd and will be soon removed.
--
-- This is a special case of `tf.add` where <tt>bias</tt> is restricted
-- to be 1-D. Broadcasting is supported, so <a>value</a> may have any
-- number of dimensions.
biasAddV1 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
biasAddV1' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Bitcasts a tensor from one type to another without copying data.
--
-- Given a tensor <tt>input</tt>, this operation returns a tensor that
-- has the same buffer data as <tt>input</tt> with datatype `type`.
--
-- If the input datatype <tt>T</tt> is larger than the output datatype
-- `type` then the shape changes from [...] to [...,
-- sizeof(<tt>T</tt>)/sizeof(`type`)].
--
-- If <tt>T</tt> is smaller than `type`, the operator requires that the
-- rightmost dimension be equal to sizeof(`type`)/sizeof(<tt>T</tt>). The
-- shape then goes from [..., sizeof(`type`)/sizeof(<tt>T</tt>)] to
-- [...].
--
-- <ul>
-- <li>NOTE*: Bitcast is implemented as a low-level cast, so machines
-- with different endian orderings will give different results.</li>
-- </ul>
bitcast :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] type') => Tensor v'1 t -> Tensor Build type'
bitcast' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] type') => OpParams -> Tensor v'1 t -> Tensor Build type'
-- | Return the shape of s0 op s1 with broadcast.
--
-- Given <tt>s0</tt> and <tt>s1</tt>, tensors that represent shapes,
-- compute <tt>r0</tt>, the broadcasted shape. <tt>s0</tt>, <tt>s1</tt>
-- and <tt>r0</tt> are all integer vectors.
broadcastArgs :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
broadcastArgs' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Return the reduction indices for computing gradients of s0 op s1 with
-- broadcast.
--
-- This is typically used by gradient computations for a broadcasting
-- operation.
broadcastGradientArgs :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
broadcastGradientArgs' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
-- | Performs beam search decoding on the logits given in input.
--
-- A note about the attribute merge_repeated: For the beam search
-- decoder, this means that if consecutive entries in a beam are the
-- same, only the first of these is emitted. That is, when the top path
-- is "A B B B B", "A B" is returned if merge_repeated = True but "A B B
-- B B" is returned if merge_repeated = False.
cTCBeamSearchDecoder :: Int64 -> Int64 -> Tensor v'1 Float -> Tensor v'2 Int32 -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float)
cTCBeamSearchDecoder' :: OpParams -> Int64 -> Int64 -> Tensor v'1 Float -> Tensor v'2 Int32 -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float)
-- | Performs greedy decoding on the logits given in inputs.
--
-- A note about the attribute merge_repeated: if enabled, when
-- consecutive logits' maximum indices are the same, only the first of
-- these is emitted. Labeling the blank <a>*</a>, the sequence "A B B * B
-- B" becomes "A B" if merge_repeated = True and "A B B B B" if
-- merge_repeated = False.
--
-- Regardless of the value of merge_repeated, if the maximum index of a
-- given time and batch corresponds to the blank, index `(num_classes -
-- 1)`, no new element is emitted.
cTCGreedyDecoder :: Tensor v'1 Float -> Tensor v'2 Int32 -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float)
cTCGreedyDecoder' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int32 -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float)
-- | Calculates the CTC Loss (log probability) for each batch entry. Also
-- calculates
--
-- the gradient. This class performs the softmax operation for you, so
-- inputs should be e.g. linear projections of outputs by an LSTM.
cTCLoss :: Tensor v'1 Float -> Tensor v'2 Int64 -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> (Tensor Build Float, Tensor Build Float)
cTCLoss' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int64 -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> (Tensor Build Float, Tensor Build Float)
-- | Cast x of type SrcT to y of DstT.
cast :: (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT
cast' :: (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT
-- | Returns element-wise smallest integer in not less than x.
ceil :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
ceil' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Checks a tensor for NaN and Inf values.
--
-- When run, reports an <tt>InvalidArgument</tt> error if <tt>tensor</tt>
-- has any values that are not a number (NaN) or infinity (Inf).
-- Otherwise, passes <tt>tensor</tt> as-is.
checkNumerics :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
checkNumerics' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the Cholesky decomposition of one or more square matrices.
--
-- The input is a tensor of shape `[..., M, M]` whose inner-most 2
-- dimensions form square matrices, with the same constraints as the
-- single matrix Cholesky decomposition above. The output is a tensor of
-- the same shape as the input containing the Cholesky decompositions for
-- all input submatrices `[..., :, :]`.
cholesky :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
cholesky' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the reverse mode backpropagated gradient of the Cholesky
-- algorithm.
--
-- For an explanation see "Differentiation of the Cholesky algorithm" by
-- Iain Murray <a>http://arxiv.org/abs/1602.07527</a>.
choleskyGrad :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
choleskyGrad' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Converts two real numbers to a complex number.
--
-- Given a tensor <a>real</a> representing the real part of a complex
-- number, and a tensor <a>imag</a> representing the imaginary part of a
-- complex number, this operation returns complex numbers elementwise of
-- the form \(a + bj\), where *a* represents the <a>real</a> part and *b*
-- represents the <a>imag</a> part.
--
-- The input tensors <a>real</a> and <a>imag</a> must have the same
-- shape.
--
-- For example:
--
-- ``` # tensor <a>real</a> is [2.25, 3.25] # tensor <a>imag</a> is
-- [4.75, 5.75] tf.complex(real, imag) ==&gt; [[2.25 + 4.75j], [3.25 +
-- 5.75j]] ```
complex :: (OneOf '[Double, Float] t, OneOf '[Complex Double, Complex Float] tout) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build tout
complex' :: (OneOf '[Double, Float] t, OneOf '[Complex Double, Complex Float] tout) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build tout
-- | Computes the complex absolute value of a tensor.
--
-- Given a tensor <tt>x</tt> of complex numbers, this operation returns a
-- tensor of type <tt>float</tt> or <tt>double</tt> that is the absolute
-- value of each element in <tt>x</tt>. All elements in <tt>x</tt> must
-- be complex numbers of the form \(a + bj\). The absolute value is
-- computed as \( sqrt{a^2 + b^2}\).
complexAbs :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout
complexAbs' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout
-- | Computes the ids of the positions in sampled_candidates that match
-- true_labels.
--
-- When doing log-odds NCE, the result of this op should be passed
-- through a SparseToDense op, then added to the logits of the sampled
-- candidates. This has the effect of <tt>removing</tt> the sampled
-- labels that match the true labels by making the classifier sure that
-- they are sampled labels.
computeAccidentalHits :: Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float)
computeAccidentalHits' :: OpParams -> Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float)
-- | Concatenates tensors along one dimension.
concat :: (TensorType t) => Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t
concat' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t
-- | Computes offsets of concat inputs within its output.
--
-- For example:
--
-- ```prettyprint # <tt>x</tt> is [2, 2, 7] # <tt>y</tt> is [2, 3, 7] #
-- <tt>z</tt> is [2, 5, 7] concat_offset(2, [x, y, z]) =&gt; [0, 0, 0],
-- [0, 2, 0], [0, 5, 0] ```
concatOffset :: Tensor v'1 Int32 -> [Tensor v'2 Int32] -> [Tensor Build Int32]
concatOffset' :: OpParams -> Tensor v'1 Int32 -> [Tensor v'2 Int32] -> [Tensor Build Int32]
-- | Concatenates tensors along one dimension.
concatV2 :: (TensorType t, OneOf '[Int32, Int64] tidx) => [Tensor v'1 t] -> Tensor v'2 tidx -> Tensor Build t
concatV2' :: (TensorType t, OneOf '[Int32, Int64] tidx) => OpParams -> [Tensor v'1 t] -> Tensor v'2 tidx -> Tensor Build t
-- | A conditional accumulator for aggregating gradients. The accumulator
-- accepts
--
-- gradients marked with local_step greater or equal to the most recent
-- global_step known to the accumulator. The average can be extracted
-- from the accumulator, provided sufficient gradients have been
-- accumulated. Extracting the average automatically resets the aggregate
-- to 0, and increments the global_step recorded by the accumulator.
conditionalAccumulator :: (MonadBuild m') => DataType -> Shape -> m' (Tensor Ref ByteString)
conditionalAccumulator' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (Tensor Ref ByteString)
-- | Returns the complex conjugate of a complex number.
--
-- Given a tensor <tt>input</tt> of complex numbers, this operation
-- returns a tensor of complex numbers that are the complex conjugate of
-- each element in <tt>input</tt>. The complex numbers in <tt>input</tt>
-- must be of the form \(a + bj\), where *a* is the real part and *b* is
-- the imaginary part.
--
-- The complex conjugate returned by this operation is of the form \(a -
-- bj\).
--
-- For example:
--
-- ``` # tensor <tt>input</tt> is [-2.25 + 4.75j, 3.25 + 5.75j]
-- tf.conj(input) ==&gt; [-2.25 - 4.75j, 3.25 - 5.75j] ```
conj :: (OneOf '[Complex Double, Complex Float] t) => Tensor v'1 t -> Tensor Build t
conj' :: (OneOf '[Complex Double, Complex Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns a constant tensor.
const :: (TensorType dtype) => Tensor Build dtype
const' :: (TensorType dtype) => OpParams -> Tensor Build dtype
-- | Does nothing. Serves as a control trigger for scheduling.
--
-- Only useful as a placeholder for control edges.
controlTrigger :: (MonadBuild m') => m' (ControlNode)
controlTrigger' :: (MonadBuild m') => OpParams -> m' (ControlNode)
-- | Computes a 2-D convolution given 4-D <tt>input</tt> and <a>filter</a>
-- tensors.
--
-- Given an input tensor of shape `[batch, in_height, in_width,
-- in_channels]` and a filter / kernel tensor of shape `[filter_height,
-- filter_width, in_channels, out_channels]`, this op performs the
-- following:
--
-- <ol>
-- <li>Flattens the filter to a 2-D matrix with shape `[filter_height *
-- filter_width * in_channels, output_channels]`.</li>
-- <li>Extracts image patches from the input tensor to form a *virtual*
-- tensor of shape `[batch, out_height, out_width, filter_height *
-- filter_width * in_channels]`.</li>
-- <li>For each patch, right-multiplies the filter matrix and the image
-- patch vector.</li>
-- </ol>
--
-- In detail, with the default NHWC format,
--
-- output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di,
-- strides[2] * j + dj, q] * filter[di, dj, q, k]
--
-- Must have `strides[0] = strides[3] = 1`. For the most common case of
-- the same horizontal and vertices strides, `strides = [1, stride,
-- stride, 1]`.
conv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
conv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes the gradients of convolution with respect to the filter.
conv2DBackpropFilter :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
conv2DBackpropFilter' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
-- | Computes the gradients of convolution with respect to the input.
conv2DBackpropInput :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
conv2DBackpropInput' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
-- | Computes a 3-D convolution given 5-D <tt>input</tt> and <a>filter</a>
-- tensors.
--
-- In signal processing, cross-correlation is a measure of similarity of
-- two waveforms as a function of a time-lag applied to one of them. This
-- is also known as a sliding dot product or sliding inner-product.
--
-- Our Conv3D implements a form of cross-correlation.
conv3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
conv3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes the gradients of 3-D convolution with respect to the filter.
conv3DBackpropFilter :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
conv3DBackpropFilter' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
-- | Computes the gradients of 3-D convolution with respect to the filter.
conv3DBackpropFilterV2 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
conv3DBackpropFilterV2' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
-- | Computes the gradients of 3-D convolution with respect to the input.
conv3DBackpropInput :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
conv3DBackpropInput' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
-- | Computes the gradients of 3-D convolution with respect to the input.
conv3DBackpropInputV2 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
conv3DBackpropInputV2' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
-- | Copy Op.
--
-- Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on
-- the device on which the tensor is allocated.
--
-- Unlike the CopyHost Op, this op does not have HostMemory constraint on
-- its input or output.
copy :: (TensorType t) => Tensor v'1 t -> Tensor Build t
copy' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Copy Host Op.
--
-- Performs CPU-to-CPU deep-copying of tensor.
--
-- Unlike the Copy Op, this op has HostMemory constraint on its input or
-- output.
copyHost :: (TensorType t) => Tensor v'1 t -> Tensor Build t
copyHost' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes cos of x element-wise.
cos :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
cos' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Increments <tt>ref</tt> until it reaches <tt>limit</tt>.
countUpTo :: (MonadBuild m', OneOf '[Int32, Int64] t) => Int64 -> Tensor Ref t -> m' (Tensor Value t)
countUpTo' :: (MonadBuild m', OneOf '[Int32, Int64] t) => OpParams -> Int64 -> Tensor Ref t -> m' (Tensor Value t)
-- | Extracts crops from the input image tensor and bilinearly resizes them
-- (possibly
--
-- with aspect ratio change) to a common output size specified by
-- <tt>crop_size</tt>. This is more general than the
-- <tt>crop_to_bounding_box</tt> op which extracts a fixed size slice
-- from the input image and does not allow resizing or aspect ratio
-- change.
--
-- Returns a tensor with <tt>crops</tt> from the input <tt>image</tt> at
-- positions defined at the bounding box locations in <tt>boxes</tt>. The
-- cropped boxes are all resized (with bilinear interpolation) to a fixed
-- `size = [crop_height, crop_width]`. The result is a 4-D tensor
-- `[num_boxes, crop_height, crop_width, depth]`.
cropAndResize :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build Float
cropAndResize' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build Float
-- | Computes the gradient of the crop_and_resize op wrt the input boxes
-- tensor.
cropAndResizeGradBoxes :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 t -> Tensor v'3 Float -> Tensor v'4 Int32 -> Tensor Build Float
cropAndResizeGradBoxes' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 t -> Tensor v'3 Float -> Tensor v'4 Int32 -> Tensor Build Float
-- | Computes the gradient of the crop_and_resize op wrt the input image
-- tensor.
cropAndResizeGradImage :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
cropAndResizeGradImage' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
-- | Compute the pairwise cross product.
--
-- <tt>a</tt> and <tt>b</tt> must be the same shape; they can either be
-- simple 3-element vectors, or any shape where the innermost dimension
-- is 3. In the latter case, each pair of corresponding 3-element vectors
-- is cross-multiplied independently.
cross :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
cross' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Compute the cumulative product of the tensor <tt>x</tt> along
-- <tt>axis</tt>.
--
-- By default, this op performs an inclusive cumprod, which means that
-- the first element of the input is identical to the first element of
-- the output: ```prettyprint tf.cumprod([a, b, c]) ==&gt; [a, a * b, a *
-- b * c] ```
--
-- By setting the <tt>exclusive</tt> kwarg to <a>True</a>, an exclusive
-- cumprod is performed instead: ```prettyprint tf.cumprod([a, b, c],
-- exclusive=True) ==&gt; [0, a, a * b] ```
--
-- By setting the <a>reverse</a> kwarg to <a>True</a>, the cumprod is
-- performed in the opposite direction: ```prettyprint tf.cumprod([a, b,
-- c], reverse=True) ==&gt; [a * b * c, b * c, c] ``` This is more
-- efficient than using separate `tf.reverse` ops.
--
-- The <a>reverse</a> and <tt>exclusive</tt> kwargs can also be combined:
-- ```prettyprint tf.cumprod([a, b, c], exclusive=True, reverse=True)
-- ==&gt; [b * c, c, 0] ```
cumprod :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
cumprod' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
-- | Compute the cumulative sum of the tensor <tt>x</tt> along
-- <tt>axis</tt>.
--
-- By default, this op performs an inclusive cumsum, which means that the
-- first element of the input is identical to the first element of the
-- output: ```prettyprint tf.cumsum([a, b, c]) ==&gt; [a, a + b, a + b +
-- c] ```
--
-- By setting the <tt>exclusive</tt> kwarg to <a>True</a>, an exclusive
-- cumsum is performed instead: ```prettyprint tf.cumsum([a, b, c],
-- exclusive=True) ==&gt; [0, a, a + b] ```
--
-- By setting the <a>reverse</a> kwarg to <a>True</a>, the cumsum is
-- performed in the opposite direction: ```prettyprint tf.cumsum([a, b,
-- c], reverse=True) ==&gt; [a + b + c, b + c, c] ``` This is more
-- efficient than using separate `tf.reverse` ops.
--
-- The <a>reverse</a> and <tt>exclusive</tt> kwargs can also be combined:
-- ```prettyprint tf.cumsum([a, b, c], exclusive=True, reverse=True)
-- ==&gt; [b + c, c, 0] ```
cumsum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
cumsum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
-- | Debug Identity Op.
--
-- Provides an identity mapping of the non-Ref type input tensor for
-- debugging.
debugIdentity :: (TensorType t) => Tensor v'1 t -> Tensor Build t
debugIdentity' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Debug NaN Value Counter Op
--
-- Counts number of NaNs in the input tensor, for debugging.
debugNanCount :: (TensorType t) => Tensor v'1 t -> Tensor Build Int64
debugNanCount' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Int64
-- | Debug Numeric Summary Op.
--
-- Provide a basic summary of numeric value types, range and
-- distribution.
debugNumericSummary :: (TensorType t) => Tensor v'1 t -> Tensor Build Double
debugNumericSummary' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Double
-- | Decode web-safe base64-encoded strings.
--
-- Input may or may not have padding at the end. See EncodeBase64 for
-- padding. Web-safe means that input must use - and _ instead of + and
-- /.
decodeBase64 :: Tensor v'1 ByteString -> Tensor Build ByteString
decodeBase64' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString
-- | Convert CSV records to tensors. Each column maps to one tensor.
--
-- RFC 4180 format is expected for the CSV records.
-- (https:/<i>tools.ietf.org</i>html/rfc4180) Note that we allow leading
-- and trailing spaces with int or float field.
decodeCSV :: (OneOfs '[ByteString, Int32, Int64, Float] oUT_TYPE) => Tensor v'1 ByteString -> TensorList (v'2) oUT_TYPE -> TensorList (Build) oUT_TYPE
decodeCSV' :: (OneOfs '[ByteString, Int32, Int64, Float] oUT_TYPE) => OpParams -> Tensor v'1 ByteString -> TensorList (v'2) oUT_TYPE -> TensorList (Build) oUT_TYPE
-- | Decode the first frame of a GIF-encoded image to a uint8 tensor.
--
-- GIF with frame or transparency compression are not supported convert
-- animated GIF from compressed to uncompressed by:
--
-- convert $src.gif -coalesce $dst.gif
decodeGif :: Tensor v'1 ByteString -> Tensor Build Word8
decodeGif' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Word8
-- | Convert JSON-encoded Example records to binary protocol buffer
-- strings.
--
-- This op translates a tensor containing Example records, encoded using
-- the <a>standard JSON mapping</a>, into a tensor containing the same
-- records encoded as binary protocol buffers. The resulting tensor can
-- then be fed to any of the other Example-parsing ops.
decodeJSONExample :: Tensor v'1 ByteString -> Tensor Build ByteString
decodeJSONExample' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString
-- | Decode a JPEG-encoded image to a uint8 tensor.
--
-- The attr <tt>channels</tt> indicates the desired number of color
-- channels for the decoded image.
--
-- Accepted values are:
--
-- <ul>
-- <li>0: Use the number of channels in the JPEG-encoded image.</li>
-- <li>1: output a grayscale image.</li>
-- <li>3: output an RGB image.</li>
-- </ul>
--
-- If needed, the JPEG-encoded image is transformed to match the
-- requested number of color channels.
--
-- The attr <tt>ratio</tt> allows downscaling the image by an integer
-- factor during decoding. Allowed values are: 1, 2, 4, and 8. This is
-- much faster than downscaling the image later.
decodeJpeg :: Tensor v'1 ByteString -> Tensor Build Word8
decodeJpeg' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Word8
-- | Decode a PNG-encoded image to a uint8 or uint16 tensor.
--
-- The attr <tt>channels</tt> indicates the desired number of color
-- channels for the decoded image.
--
-- Accepted values are:
--
-- <ul>
-- <li>0: Use the number of channels in the PNG-encoded image.</li>
-- <li>1: output a grayscale image.</li>
-- <li>3: output an RGB image.</li>
-- <li>4: output an RGBA image.</li>
-- </ul>
--
-- If needed, the PNG-encoded image is transformed to match the requested
-- number of color channels.
decodePng :: (OneOf '[Word16, Word8] dtype) => Tensor v'1 ByteString -> Tensor Build dtype
decodePng' :: (OneOf '[Word16, Word8] dtype) => OpParams -> Tensor v'1 ByteString -> Tensor Build dtype
-- | Reinterpret the bytes of a string as a vector of numbers.
decodeRaw :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] out_type) => Tensor v'1 ByteString -> Tensor Build out_type
decodeRaw' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type
-- | Delete the tensor specified by its handle in the session.
deleteSessionTensor :: (MonadBuild m') => Tensor v'1 ByteString -> m' (ControlNode)
deleteSessionTensor' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> m' (ControlNode)
-- | Applies set operation along last dimension of 2 <a>Tensor</a> inputs.
--
-- See SetOperationOp::SetOperationFromContext for values of
-- <tt>set_operation</tt>.
--
-- Output <tt>result</tt> is a <tt>SparseTensor</tt> represented by
-- <tt>result_indices</tt>, <tt>result_values</tt>, and
-- <tt>result_shape</tt>. For <tt>set1</tt> and <tt>set2</tt> ranked
-- <tt>n</tt>, this has rank <tt>n</tt> and the same 1st `n-1` dimensions
-- as <tt>set1</tt> and <tt>set2</tt>. The <tt>nth</tt> dimension
-- contains the result of <tt>set_operation</tt> applied to the
-- corresponding `[0...n-1]` dimension of <tt>set</tt>.
denseToDenseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
denseToDenseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
-- | Applies set operation along last dimension of <a>Tensor</a> and
-- <tt>SparseTensor</tt>.
--
-- See SetOperationOp::SetOperationFromContext for values of
-- <tt>set_operation</tt>.
--
-- Input <tt>set2</tt> is a <tt>SparseTensor</tt> represented by
-- <tt>set2_indices</tt>, <tt>set2_values</tt>, and <tt>set2_shape</tt>.
-- For <tt>set2</tt> ranked <tt>n</tt>, 1st `n-1` dimensions must be the
-- same as <tt>set1</tt>. Dimension <tt>n</tt> contains values in a set,
-- duplicates are allowed but ignored.
--
-- If <tt>validate_indices</tt> is <a>True</a>, this op validates the
-- order and range of <tt>set2</tt> indices.
--
-- Output <tt>result</tt> is a <tt>SparseTensor</tt> represented by
-- <tt>result_indices</tt>, <tt>result_values</tt>, and
-- <tt>result_shape</tt>. For <tt>set1</tt> and <tt>set2</tt> ranked
-- <tt>n</tt>, this has rank <tt>n</tt> and the same 1st `n-1` dimensions
-- as <tt>set1</tt> and <tt>set2</tt>. The <tt>nth</tt> dimension
-- contains the result of <tt>set_operation</tt> applied to the
-- corresponding `[0...n-1]` dimension of <tt>set</tt>.
denseToSparseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
denseToSparseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
-- | DepthToSpace for tensors of type T.
--
-- Rearranges data from depth into blocks of spatial data. This is the
-- reverse transformation of SpaceToDepth. More specifically, this op
-- outputs a copy of the input tensor where values from the
-- <tt>depth</tt> dimension are moved in spatial blocks to the
-- <tt>height</tt> and <tt>width</tt> dimensions. The attr
-- <tt>block_size</tt> indicates the input block size and how the data is
-- moved.
--
-- <ul>
-- <li>Chunks of data of size `block_size * block_size` from depth are
-- rearranged into non-overlapping blocks of size `block_size x
-- block_size`</li>
-- <li>The width the output tensor is `input_depth * block_size`, whereas
-- the height is `input_height * block_size`.</li>
-- <li>The depth of the input tensor must be divisible by `block_size *
-- block_size`.</li>
-- </ul>
--
-- That is, assuming the input is in the shape: `[batch, height, width,
-- depth]`, the shape of the output will be: `[batch, height*block_size,
-- width*block_size, depth/(block_size*block_size)]`
--
-- This operation requires that the input tensor be of rank 4, and that
-- <tt>block_size</tt> be &gt;=1 and that `block_size * block_size` be a
-- divisor of the input depth.
--
-- This operation is useful for resizing the activations between
-- convolutions (but keeping all data), e.g. instead of pooling. It is
-- also useful for training purely convolutional models.
--
-- For example, given this input of shape `[1, 1, 1, 4]`, and a block
-- size of 2:
--
-- ```prettyprint x = [[[[1, 2, 3, 4]]]]
--
-- ```
--
-- This operation will output a tensor of shape `[1, 2, 2, 1]`:
--
-- ```prettyprint [[[[1], [2]], [[3], [4]]]] ```
--
-- Here, the input has a batch of 1 and each batch element has shape `[1,
-- 1, 4]`, the corresponding output will have 2x2 elements and will have
-- a depth of 1 channel (1 = `4 / (block_size * block_size)`). The output
-- element shape is `[2, 2, 1]`.
--
-- For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`,
-- e.g.
--
-- ```prettyprint x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ```
--
-- This operation, for block size of 2, will return the following tensor
-- of shape `[1, 2, 2, 3]`
--
-- ```prettyprint [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
--
-- ```
--
-- Similarly, for the following input of shape `[1 2 2 4]`, and a block
-- size of 2:
--
-- ```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
-- [13, 14, 15, 16]]]] ```
--
-- the operator will return the following tensor of shape `[1 4 4 1]`:
--
-- ```prettyprint x = [[ [1], [2], [5], [6]], [ [3], [4], [7], [8]], [
-- [9], [10], [13], [14]], [ [11], [12], [15], [16]]]
--
-- ```
depthToSpace :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor Build t
depthToSpace' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor Build t
-- | Computes a 2-D depthwise convolution given 4-D <tt>input</tt> and
-- <a>filter</a> tensors.
--
-- Given an input tensor of shape `[batch, in_height, in_width,
-- in_channels]` and a filter / kernel tensor of shape `[filter_height,
-- filter_width, in_channels, channel_multiplier]`, containing
-- <tt>in_channels</tt> convolutional filters of depth 1,
-- <tt>depthwise_conv2d</tt> applies a different filter to each input
-- channel (expanding from 1 channel to <tt>channel_multiplier</tt>
-- channels for each), then concatenates the results together. Thus, the
-- output has `in_channels * channel_multiplier` channels.
--
-- for k in 0..in_channels-1 for q in 0..channel_multiplier-1 output[b,
-- i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] *
-- i + di, strides[2] * j + dj, k] * filter[di, dj, k, q]
--
-- Must have `strides[0] = strides[3] = 1`. For the most common case of
-- the same horizontal and vertices strides, `strides = [1, stride,
-- stride, 1]`.
depthwiseConv2dNative :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
depthwiseConv2dNative' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes the gradients of depthwise convolution with respect to the
-- filter.
depthwiseConv2dNativeBackpropFilter :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
depthwiseConv2dNativeBackpropFilter' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
-- | Computes the gradients of depthwise convolution with respect to the
-- input.
depthwiseConv2dNativeBackpropInput :: (OneOf '[Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
depthwiseConv2dNativeBackpropInput' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
-- | Dequantize the <tt>input</tt> tensor into a float Tensor.
--
-- <ul>
-- <li><i>min_range, max_range</i> are scalar floats that specify the
-- range for the <tt>input</tt> data. The <tt>mode</tt> attribute
-- controls exactly which calculations are used to convert the float
-- values to their quantized equivalents.</li>
-- </ul>
--
-- In <tt>MIN_COMBINED</tt> mode, each value of the tensor will undergo
-- the following:
--
-- ``` if T == qint8, in[i] += (range(T) + 1)/ 2.0 out[i] = min_range +
-- (in[i]* (max_range - min_range) / range(T)) ``` here `range(T) =
-- numeric_limits<a>T</a>::max() - numeric_limits<a>T</a>::min()`
--
-- <ul>
-- <li>MIN_COMBINED Mode Example*</li>
-- </ul>
--
-- If the input comes from a QuantizedRelu6, the output type is quint8
-- (range of 0-255) but the possible range of QuantizedRelu6 is 0-6. The
-- min_range and max_range values are therefore 0.0 and 6.0. Dequantize
-- on quint8 will take each value, cast to float, and multiply by 6 /
-- 255. Note that if quantizedtype is qint8, the operation will
-- additionally add each value by 128 prior to casting.
--
-- If the mode is <tt>MIN_FIRST</tt>, then this approach is used:
--
-- ``` number_of_steps = 1 &lt;&lt; (# of bits in T) range_adjust =
-- number_of_steps / (number_of_steps - 1) range = (range_max -
-- range_min) * range_adjust range_scale = range / number_of_steps const
-- double offset_input = static_cast<a>double</a>(input) -
-- lowest_quantized; result = range_min + ((input -
-- numeric_limits<a>T</a>::min()) * range_scale) ```
dequantize :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
dequantize' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
-- | Deserialize and concatenate <tt>SparseTensors</tt> from a serialized
-- minibatch.
--
-- The input <tt>serialized_sparse</tt> must be a string matrix of shape
-- `[N x 3]` where <tt>N</tt> is the minibatch size and the rows
-- correspond to packed outputs of <tt>SerializeSparse</tt>. The ranks of
-- the original <tt>SparseTensor</tt> objects must all match. When the
-- final <tt>SparseTensor</tt> is created, it has rank one higher than
-- the ranks of the incoming <tt>SparseTensor</tt> objects (they have
-- been concatenated along a new row dimension).
--
-- The output <tt>SparseTensor</tt> object's shape values for all
-- dimensions but the first are the max across the input
-- <tt>SparseTensor</tt> objects' shape values for the corresponding
-- dimensions. Its first shape value is <tt>N</tt>, the minibatch size.
--
-- The input <tt>SparseTensor</tt> objects' indices are assumed ordered
-- in standard lexicographic order. If this is not the case, after this
-- step run <tt>SparseReorder</tt> to restore index ordering.
--
-- For example, if the serialized input is a `[2 x 3]` matrix
-- representing two original <tt>SparseTensor</tt> objects:
--
-- index = [ 0] [10] [20] values = [1, 2, 3] shape = [50]
--
-- and
--
-- index = [ 2] [10] values = [4, 5] shape = [30]
--
-- then the final deserialized <tt>SparseTensor</tt> will be:
--
-- index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5]
-- shape = [2 50]
deserializeManySparse :: (TensorType dtype) => Tensor v'1 ByteString -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64)
deserializeManySparse' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64)
-- | Destroys the temporary variable and returns its final value.
--
-- Sets output to the value of the Tensor pointed to by <tt>ref</tt>,
-- then destroys the temporary variable called <tt>var_name</tt>. All
-- other uses of <tt>ref</tt> *must* have executed before this op. This
-- is typically achieved by chaining the ref through each assign op, or
-- by using control dependencies.
--
-- Outputs the final value of the tensor pointed to by <tt>ref</tt>.
destroyTemporaryVariable :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Value t)
destroyTemporaryVariable' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Value t)
-- | Returns a diagonal tensor with a given diagonal values.
--
-- Given a <tt>diagonal</tt>, this operation returns a tensor with the
-- <tt>diagonal</tt> and everything else padded with zeros. The diagonal
-- is computed as follows:
--
-- Assume <tt>diagonal</tt> has dimensions [D1,..., Dk], then the output
-- is a tensor of rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
--
-- `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0
-- everywhere else.
--
-- For example:
--
-- ```prettyprint # <tt>diagonal</tt> is [1, 2, 3, 4] tf.diag(diagonal)
-- ==&gt; [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] ```
diag :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor Build t
diag' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns the diagonal part of the tensor.
--
-- This operation returns a tensor with the <tt>diagonal</tt> part of the
-- <tt>input</tt>. The <tt>diagonal</tt> part is computed as follows:
--
-- Assume <tt>input</tt> has dimensions `[D1,..., Dk, D1,..., Dk]`, then
-- the output is a tensor of rank <tt>k</tt> with dimensions `[D1,...,
-- Dk]` where:
--
-- `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
--
-- For example:
--
-- ```prettyprint # <tt>input</tt> is [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0,
-- 3, 0] [0, 0, 0, 4]]
--
-- tf.diag_part(input) ==&gt; [1, 2, 3, 4] ```
diagPart :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor Build t
diagPart' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes Psi, the derivative of Lgamma (the log of the absolute value
-- of
--
-- `Gamma(x)`), element-wise.
digamma :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
digamma' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the grayscale dilation of 4-D <tt>input</tt> and 3-D
-- <a>filter</a> tensors.
--
-- The <tt>input</tt> tensor has shape `[batch, in_height, in_width,
-- depth]` and the <a>filter</a> tensor has shape `[filter_height,
-- filter_width, depth]`, i.e., each input channel is processed
-- independently of the others with its own structuring function. The
-- <tt>output</tt> tensor has shape `[batch, out_height, out_width,
-- depth]`. The spatial dimensions of the output tensor depend on the
-- <tt>padding</tt> algorithm. We currently only support the default
-- <a>NHWC</a> <tt>data_format</tt>.
--
-- In detail, the grayscale morphological 2-D dilation is the max-sum
-- correlation (for consistency with <tt>conv2d</tt>, we use unmirrored
-- filters):
--
-- output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] *
-- dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c]
--
-- Max-pooling is a special case when the filter has size equal to the
-- pooling kernel size and contains all zeros.
--
-- Note on duality: The dilation of <tt>input</tt> by the <a>filter</a>
-- is equal to the negation of the erosion of `-input` by the reflected
-- <a>filter</a>.
dilation2D :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
dilation2D' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes the gradient of morphological 2-D dilation with respect to
-- the filter.
dilation2DBackpropFilter :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
dilation2DBackpropFilter' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
-- | Computes the gradient of morphological 2-D dilation with respect to
-- the input.
dilation2DBackpropInput :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
dilation2DBackpropInput' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
-- | Returns x / y element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>Div</tt> supports broadcasting. More about broadcasting
-- <a>here</a></li>
-- </ul>
div :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
div' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Draw bounding boxes on a batch of images.
--
-- Outputs a copy of <tt>images</tt> but draws on top of the pixels zero
-- or more bounding boxes specified by the locations in <tt>boxes</tt>.
-- The coordinates of the each bounding box in <tt>boxes</tt> are encoded
-- as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are
-- floats in `[0.0, 1.0]` relative to the width and height of the
-- underlying image.
--
-- For example, if an image is 100 x 200 pixels and the bounding box is
-- `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of
-- the bounding box will be `(10, 40)` to `(50, 180)`.
--
-- Parts of the bounding box may fall outside the image.
drawBoundingBoxes :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor Build t
drawBoundingBoxes' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor Build t
-- | Partitions `data` into <tt>num_partitions</tt> tensors using indices
-- from <tt>partitions</tt>.
--
-- For each index tuple <tt>js</tt> of size `partitions.ndim`, the slice
-- `data[js, ...]` becomes part of `outputs[partitions[js]]`. The slices
-- with `partitions[js] = i` are placed in `outputs[i]` in lexicographic
-- order of <tt>js</tt>, and the first dimension of `outputs[i]` is the
-- number of entries in <tt>partitions</tt> equal to <tt>i</tt>. In
-- detail,
--
-- ```python outputs[i].shape = [sum(partitions == i)] +
-- data.shape[partitions.ndim:]
--
-- outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) ```
--
-- `data.shape` must start with `partitions.shape`.
--
-- For example:
--
-- ```python # Scalar partitions. partitions = 1 num_partitions = 2 data
-- = [10, 20] outputs[0] = [] # Empty with shape [0, 2] outputs[1] =
-- [[10, 20]]
--
-- # Vector partitions. partitions = [0, 0, 1, 1, 0] num_partitions = 2
-- data = [10, 20, 30, 40, 50] outputs[0] = [10, 20, 50] outputs[1] =
-- [30, 40] ```
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/DynamicPartition.png" alt</a> <a>/div</a>
dynamicPartition :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor v'2 Int32 -> [Tensor Build t]
dynamicPartition' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 Int32 -> [Tensor Build t]
-- | Interleave the values from the `data` tensors into a single tensor.
--
-- Builds a merged tensor such that
--
-- ```python merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
-- ```
--
-- For example, if each `indices[m]` is scalar or vector, we have
--
-- ```python # Scalar indices: merged[indices[m], ...] = data[m][...]
--
-- # Vector indices: merged[indices[m][i], ...] = data[m][i, ...] ```
--
-- Each `data[i].shape` must start with the corresponding
-- `indices[i].shape`, and the rest of `data[i].shape` must be constant
-- w.r.t. <tt>i</tt>. That is, we must have `data[i].shape =
-- indices[i].shape + constant`. In terms of this <tt>constant</tt>, the
-- output shape is
--
-- merged.shape = [max(indices)] + constant
--
-- Values are merged in order, so if an index appears in both
-- `indices[m][i]` and `indices[n][j]` for `(m,i) &lt; (n,j)` the slice
-- `data[n][j]` will appear in the merged result.
--
-- For example:
--
-- ```python indices[0] = 6 indices[1] = [4, 1] indices[2] = [[5, 2], [0,
-- 3]] data[0] = [61, 62] data[1] = [[41, 42], [11, 12]] data[2] = [[[51,
-- 52], [21, 22]], [[1, 2], [31, 32]]] merged = [[1, 2], [11, 12], [21,
-- 22], [31, 32], [41, 42], [51, 52], [61, 62]] ```
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/DynamicStitch.png" alt</a> <a>/div</a>
dynamicStitch :: (TensorType t) => [Tensor v'1 Int32] -> [Tensor v'2 t] -> Tensor Build t
dynamicStitch' :: (TensorType t) => OpParams -> [Tensor v'1 Int32] -> [Tensor v'2 t] -> Tensor Build t
-- | Computes the (possibly normalized) Levenshtein Edit Distance.
--
-- The inputs are variable-length sequences provided by SparseTensors
-- (hypothesis_indices, hypothesis_values, hypothesis_shape) and
-- (truth_indices, truth_values, truth_shape).
--
-- The inputs are:
editDistance :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor Build Float
editDistance' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor Build Float
-- | Computes exponential linear: `exp(features) - 1` if &lt; 0,
-- <tt>features</tt> otherwise.
--
-- See <a>Fast and Accurate Deep Network Learning by Exponential Linear
-- Units (ELUs)</a>
elu :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t
elu' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes gradients for the exponential linear (Elu) operation.
eluGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
eluGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Encode strings into web-safe base64 format.
--
-- Refer to the following article for more information on base64 format:
-- en.wikipedia.org<i>wiki</i>Base64. Base64 strings may have padding
-- with '=' at the end so that the encoded has length multiple of 4. See
-- Padding section of the link above.
--
-- Web-safe means that the encoder uses - and _ instead of + and /.
encodeBase64 :: Tensor v'1 ByteString -> Tensor Build ByteString
encodeBase64' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString
-- | JPEG-encode an image.
--
-- <tt>image</tt> is a 3-D uint8 Tensor of shape `[height, width,
-- channels]`.
--
-- The attr <tt>format</tt> can be used to override the color format of
-- the encoded output. Values can be:
--
-- <ul>
-- <li>`''`: Use a default format based on the number of channels in the
-- image.</li>
-- <li><tt>grayscale</tt>: Output a grayscale JPEG image. The
-- <tt>channels</tt> dimension of <tt>image</tt> must be 1.</li>
-- <li><tt>rgb</tt>: Output an RGB JPEG image. The <tt>channels</tt>
-- dimension of <tt>image</tt> must be 3.</li>
-- </ul>
--
-- If <tt>format</tt> is not specified or is the empty string, a default
-- format is picked in function of the number of channels in
-- <tt>image</tt>:
--
-- <ul>
-- <li>1: Output a grayscale image.</li>
-- <li>3: Output an RGB image.</li>
-- </ul>
encodeJpeg :: Tensor v'1 Word8 -> Tensor Build ByteString
encodeJpeg' :: OpParams -> Tensor v'1 Word8 -> Tensor Build ByteString
-- | PNG-encode an image.
--
-- <tt>image</tt> is a 3-D uint8 or uint16 Tensor of shape `[height,
-- width, channels]` where <tt>channels</tt> is:
--
-- <ul>
-- <li>1: for grayscale.</li>
-- <li>2: for grayscale + alpha.</li>
-- <li>3: for RGB.</li>
-- <li>4: for RGBA.</li>
-- </ul>
--
-- The ZLIB compression level, <tt>compression</tt>, can be -1 for the
-- PNG-encoder default or a value from 0 to 9. 9 is the highest
-- compression level, generating the smallest output, but is slower.
encodePng :: (OneOf '[Word16, Word8] t) => Tensor v'1 t -> Tensor Build ByteString
encodePng' :: (OneOf '[Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString
-- | Creates or finds a child frame, and makes `data` available to the
-- child frame.
--
-- This op is used together with <tt>Exit</tt> to create loops in the
-- graph. The unique <tt>frame_name</tt> is used by the <tt>Executor</tt>
-- to identify frames. If <tt>is_constant</tt> is true, <tt>output</tt>
-- is a constant in the child frame; otherwise it may be changed in the
-- child frame. At most <tt>parallel_iterations</tt> iterations are run
-- in parallel in the child frame.
enter :: (TensorType t) => Tensor v'1 t -> Tensor Build t
enter' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns the truth value of (x == y) element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>Equal</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
equal :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
equal' :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
-- | Computes the Gauss error function of <tt>x</tt> element-wise.
erf :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
erf' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the complementary error function of <tt>x</tt> element-wise.
erfc :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
erfc' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Exits the current frame to its parent frame.
--
-- Exit makes its input `data` available to the parent frame.
exit :: (TensorType t) => Tensor v'1 t -> Tensor Build t
exit' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes exponential of x element-wise. \(y = e^x\).
exp :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
exp' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Inserts a dimension of 1 into a tensor's shape.
--
-- Given a tensor <tt>input</tt>, this operation inserts a dimension of 1
-- at the dimension index <tt>dim</tt> of <tt>input</tt>'s shape. The
-- dimension index <tt>dim</tt> starts at zero; if you specify a negative
-- number for <tt>dim</tt> it is counted backward from the end.
--
-- This operation is useful if you want to add a batch dimension to a
-- single element. For example, if you have a single image of shape
-- `[height, width, channels]`, you can make it a batch of 1 image with
-- `expand_dims(image, 0)`, which will make the shape `[1, height, width,
-- channels]`.
--
-- Other examples:
--
-- ```prettyprint # <tt>t</tt> is a tensor of shape [2]
-- shape(expand_dims(t, 0)) ==&gt; [1, 2] shape(expand_dims(t, 1)) ==&gt;
-- [2, 1] shape(expand_dims(t, -1)) ==&gt; [2, 1]
--
-- # <tt>t2</tt> is a tensor of shape [2, 3, 5] shape(expand_dims(t2, 0))
-- ==&gt; [1, 2, 3, 5] shape(expand_dims(t2, 2)) ==&gt; [2, 3, 1, 5]
-- shape(expand_dims(t2, 3)) ==&gt; [2, 3, 5, 1] ```
--
-- This operation requires that:
--
-- `-1-input.dims() &lt;= dim &lt;= input.dims()`
--
-- This operation is related to `squeeze()`, which removes dimensions of
-- size 1.
expandDims :: (TensorType t, OneOf '[Int32, Int64] tdim) => Tensor v'1 t -> Tensor v'2 tdim -> Tensor Build t
expandDims' :: (TensorType t, OneOf '[Int32, Int64] tdim) => OpParams -> Tensor v'1 t -> Tensor v'2 tdim -> Tensor Build t
-- | Computes exponential of x - 1 element-wise.
--
-- I.e., \(y = (exp x) - 1\).
expm1 :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
expm1' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Extracts a glimpse from the input tensor.
--
-- Returns a set of windows called glimpses extracted at location
-- <tt>offsets</tt> from the input tensor. If the windows only partially
-- overlaps the inputs, the non overlapping areas will be filled with
-- random noise.
--
-- The result is a 4-D tensor of shape `[batch_size, glimpse_height,
-- glimpse_width, channels]`. The channels and batch dimensions are the
-- same as that of the input tensor. The height and width of the output
-- windows are specified in the <a>size</a> parameter.
--
-- The argument <tt>normalized</tt> and <tt>centered</tt> controls how
-- the windows are built:
--
-- <ul>
-- <li>If the coordinates are normalized but not centered, 0.0 and 1.0
-- correspond to the minimum and maximum of each height and width
-- dimension.</li>
-- <li>If the coordinates are both normalized and centered, they range
-- from</li>
-- <li>1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
-- left corner, the lower right corner is located at (1.0, 1.0) and the
-- center is at (0, 0).</li>
-- <li>If the coordinates are not normalized they are interpreted as
-- numbers of pixels.</li>
-- </ul>
extractGlimpse :: Tensor v'1 Float -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build Float
extractGlimpse' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build Float
-- | Extract <tt>patches</tt> from <tt>images</tt> and put them in the
-- "depth" output dimension.
extractImagePatches :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t
extractImagePatches' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Compute the 1-dimensional discrete Fourier Transform over the
-- inner-most
--
-- dimension of <tt>input</tt>.
fFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
fFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
-- | Compute the 2-dimensional discrete Fourier Transform over the
-- inner-most
--
-- 2 dimensions of <tt>input</tt>.
fFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
fFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
-- | Compute the 3-dimensional discrete Fourier Transform over the
-- inner-most 3
--
-- dimensions of <tt>input</tt>.
fFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
fFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
-- | A queue that produces elements in first-in first-out order.
fIFOQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString)
fIFOQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString)
-- | A queue that produces elements in first-in first-out order.
fIFOQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle)
fIFOQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle)
-- | Output a fact about factorials.
fact :: Tensor Build ByteString
fact' :: OpParams -> Tensor Build ByteString
-- | Fake-quantize the <tt>inputs</tt> tensor, type float to
-- <tt>outputs</tt> tensor of same type.
--
-- Attributes [min; max] define the clamping range for the
-- <tt>inputs</tt> data. Op divides this range into 255 steps (total of
-- 256 values), then replaces each <tt>inputs</tt> value with the closest
-- of the quantized step values.
--
-- Quantization is called fake since the output is still in floating
-- point.
fakeQuantWithMinMaxArgs :: Tensor v'1 Float -> Tensor Build Float
fakeQuantWithMinMaxArgs' :: OpParams -> Tensor v'1 Float -> Tensor Build Float
-- | Compute gradients for a FakeQuantWithMinMaxArgs operation.
fakeQuantWithMinMaxArgsGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
fakeQuantWithMinMaxArgsGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
-- | Fake-quantize the <tt>inputs</tt> tensor of type float and shape `[b,
-- h, w, d]` via
--
-- global float scalars <a>min</a> and <a>max</a> to <tt>outputs</tt>
-- tensor of same shape as <tt>inputs</tt>.
--
-- <ul>
-- <li><i>min; max</i> is the clamping range for the <tt>inputs</tt>
-- data. Op divides this range into 255 steps (total of 256 values), then
-- replaces each <tt>inputs</tt> value with the closest of the quantized
-- step values.</li>
-- </ul>
--
-- This operation has a gradient and thus allows for training <a>min</a>
-- and <a>max</a> values.
fakeQuantWithMinMaxVars :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
fakeQuantWithMinMaxVars' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
-- | Compute gradients for a FakeQuantWithMinMaxVars operation.
fakeQuantWithMinMaxVarsGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)
fakeQuantWithMinMaxVarsGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)
-- | Fake-quantize the <tt>inputs</tt> tensor of type float and one of the
-- shapes: `[d]`,
--
-- `[b, d]` `[b, h, w, d]` via per-channel floats <a>min</a> and
-- <a>max</a> of shape `[d]` to <tt>outputs</tt> tensor of same shape as
-- <tt>inputs</tt>.
--
-- <ul>
-- <li><i>min; max</i> is the clamping range for the <tt>inputs</tt> data
-- in the corresponding depth channel. Op divides this range into 255
-- steps (total of 256 values), then replaces each <tt>inputs</tt> value
-- with the closest of the quantized step values.</li>
-- </ul>
--
-- This operation has a gradient and thus allows for training <a>min</a>
-- and <a>max</a> values.
fakeQuantWithMinMaxVarsPerChannel :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
fakeQuantWithMinMaxVarsPerChannel' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
-- | Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
fakeQuantWithMinMaxVarsPerChannelGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)
fakeQuantWithMinMaxVarsPerChannelGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)
-- | Deprecated. Do not use.
fakeQueue :: (MonadBuild m') => ResourceHandle -> m' (Tensor Ref ByteString)
fakeQueue' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Ref ByteString)
-- | Creates a tensor filled with a scalar value.
--
-- This operation creates a tensor of shape <tt>dims</tt> and fills it
-- with <a>value</a>.
--
-- For example:
--
-- ```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9)
-- ==&gt; [[9, 9, 9] [9, 9, 9]] ```
fill :: (TensorType t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
fill' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
-- | A Reader that outputs fixed-length records from a file.
fixedLengthRecordReader :: (MonadBuild m') => Int64 -> m' (Tensor Ref ByteString)
fixedLengthRecordReader' :: (MonadBuild m') => OpParams -> Int64 -> m' (Tensor Ref ByteString)
-- | A Reader that outputs fixed-length records from a file.
fixedLengthRecordReaderV2 :: (MonadBuild m') => Int64 -> m' (ResourceHandle)
fixedLengthRecordReaderV2' :: (MonadBuild m') => OpParams -> Int64 -> m' (ResourceHandle)
-- | Generates labels for candidate sampling with a learned unigram
-- distribution.
--
-- A unigram sampler could use a fixed unigram distribution read from a
-- file or passed in as an in-memory array instead of building up the
-- distribution from data on the fly. There is also an option to skew the
-- distribution by applying a distortion power to the weights.
--
-- The vocabulary file should be in CSV-like format, with the last field
-- being the weight associated with the word.
--
-- For each batch, this op picks a single set of sampled candidate
-- labels.
--
-- The advantages of sampling candidates per-batch are simplicity and the
-- possibility of efficient dense matrix multiplication. The disadvantage
-- is that the sampled candidates must be chosen independently of the
-- context and of the true labels.
fixedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
fixedUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
-- | Returns element-wise largest integer not greater than x.
floor :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
floor' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns x // y element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>FloorDiv</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
floorDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
floorDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Returns element-wise remainder of division. When `x &lt; 0` xor `y
-- &lt; 0` is
--
-- true, this follows Python semantics in that the result here is
-- consistent with a flooring divide. E.g. `floor(x / y) * y + mod(x, y)
-- = x`.
--
-- <ul>
-- <li>NOTE*: <tt>FloorMod</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
floorMod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
floorMod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Performs fractional average pooling on the input.
--
-- Fractional average pooling is similar to Fractional max pooling in the
-- pooling region generation step. The only difference is that after
-- pooling regions are generated, a mean operation is performed instead
-- of a max operation in each pooling region.
fractionalAvgPool :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)
fractionalAvgPool' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)
-- | Computes gradient of the FractionalAvgPool function.
--
-- Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
-- FractionalAvgPoolGrad, we just need to evenly back-propagate each
-- element of out_backprop to those indices that form the same pooling
-- cell. Therefore, we just need to know the shape of original input
-- tensor, instead of the whole tensor.
fractionalAvgPoolGrad :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor Build t
fractionalAvgPoolGrad' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor Build t
-- | Performs fractional max pooling on the input.
--
-- Fractional max pooling is slightly different than regular max pooling.
-- In regular max pooling, you downsize an input set by taking the
-- maximum value of smaller N x N subsections of the set (often 2x2), and
-- try to reduce the set by a factor of N, where N is an integer.
-- Fractional max pooling, as you might expect from the word
-- "fractional", means that the overall reduction ratio N does not have
-- to be an integer.
--
-- The sizes of the pooling regions are generated randomly but are fairly
-- uniform. For example, let's look at the height dimension, and the
-- constraints on the list of rows that will be pool boundaries.
--
-- First we define the following:
--
-- <ol>
-- <li>input_row_length : the number of rows from the input set</li>
-- <li>output_row_length : which will be smaller than the input</li>
-- <li>alpha = input_row_length / output_row_length : our reduction
-- ratio</li>
-- <li>K = floor(alpha)</li>
-- <li>row_pooling_sequence : this is the result list of pool boundary
-- rows</li>
-- </ol>
--
-- Then, row_pooling_sequence should satisfy:
--
-- <ol>
-- <li>a[0] = 0 : the first value of the sequence is 0</li>
-- <li>a[end] = input_row_length : the last value of the sequence is the
-- size</li>
-- <li>K &lt;= (a[i+1] - a[i]) &lt;= K+1 : all intervals are K or K+1
-- size</li>
-- <li>length(row_pooling_sequence) = output_row_length+1</li>
-- </ol>
--
-- For more details on fractional max pooling, see this paper:
-- <a>Benjamin Graham, Fractional Max-Pooling</a>
fractionalMaxPool :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)
fractionalMaxPool' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)
-- | Computes gradient of the FractionalMaxPool function.
fractionalMaxPoolGrad :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 Int64 -> Tensor v'5 Int64 -> Tensor Build t
fractionalMaxPoolGrad' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 Int64 -> Tensor v'5 Int64 -> Tensor Build t
-- | Batch normalization.
--
-- Note that the size of 4D Tensors are defined by either <a>NHWC</a> or
-- <a>NCHW</a>. The size of 1D Tensors matches the dimension C of the 4D
-- Tensors.
fusedBatchNorm :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
fusedBatchNorm' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
-- | Gradient for batch normalization.
--
-- Note that the size of 4D Tensors are defined by either <a>NHWC</a> or
-- <a>NCHW</a>. The size of 1D Tensors matches the dimension C of the 4D
-- Tensors.
fusedBatchNormGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
fusedBatchNormGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
-- | Performs a padding as a preprocess during a convolution.
--
-- Similar to FusedResizeAndPadConv2d, this op allows for an optimized
-- implementation where the spatial padding transformation stage is fused
-- with the im2col lookup, but in this case without the bilinear
-- filtering required for resizing. Fusing the padding prevents the need
-- to write out the intermediate results as whole tensors, reducing
-- memory pressure, and we can get some latency gains by merging the
-- transformation calculations. The data_format attribute for Conv2D
-- isn't supported by this op, and <tt>NHWC</tt> order is used instead.
-- Internally this op uses a single per-graph scratch buffer, which means
-- that it will block if multiple versions are being run in parallel.
-- This is because this operator is primarily an optimization to minimize
-- memory usage.
fusedPadConv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
fusedPadConv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
-- | Performs a resize and padding as a preprocess during a convolution.
--
-- It's often possible to do spatial transformations more efficiently as
-- part of the packing stage of a convolution, so this op allows for an
-- optimized implementation where these stages are fused together. This
-- prevents the need to write out the intermediate results as whole
-- tensors, reducing memory pressure, and we can get some latency gains
-- by merging the transformation calculations. The data_format attribute
-- for Conv2D isn't supported by this op, and defaults to <tt>NHWC</tt>
-- order. Internally this op uses a single per-graph scratch buffer,
-- which means that it will block if multiple versions are being run in
-- parallel. This is because this operator is primarily an optimization
-- to minimize memory usage.
fusedResizeAndPadConv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor v'4 t -> Tensor Build t
fusedResizeAndPadConv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor v'4 t -> Tensor Build t
-- | Gather slices from <tt>params</tt> according to <tt>indices</tt>.
--
-- <tt>indices</tt> must be an integer tensor of any dimension (usually
-- 0-D or 1-D). Produces an output tensor with shape `indices.shape +
-- params.shape[1:]` where:
--
-- ```python # Scalar indices output[:, ..., :] = params[indices, :, ...
-- :]
--
-- # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :]
--
-- # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i,
-- ..., j], :, ..., :] ```
--
-- If <tt>indices</tt> is a permutation and `len(indices) ==
-- params.shape[0]` then this operation will permute <tt>params</tt>
-- accordingly.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/Gather.png" alt</a> <a>/div</a>
gather :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams
gather' :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams
-- | Gather values or slices from <tt>params</tt> according to
-- <tt>indices</tt>.
--
-- <tt>params</tt> is a Tensor of rank <tt>P</tt> and <tt>indices</tt> is
-- a Tensor of rank <tt>Q</tt>.
--
-- <tt>indices</tt> must be integer tensor, containing indices into
-- <tt>params</tt>. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0
-- &lt; K &lt;= P`.
--
-- The innermost dimension of <tt>indices</tt> (with length <tt>K</tt>)
-- corresponds to indices into elements (if `K = P`) or slices (if `K
-- &lt; P`) along the <tt>K</tt>th dimension of <tt>params</tt>.
--
-- Produces an output tensor with shape
--
-- ``` [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]]. ```
--
-- Some examples below.
--
-- Simple indexing into a matrix:
--
-- ```python indices = [[0, 0], [1, 1]] params = [[<tt>a</tt>,
-- <tt>b</tt>], [<tt>c</tt>, <tt>d</tt>]] output = [<tt>a</tt>,
-- <tt>d</tt>] ```
--
-- Slice indexing into a matrix:
--
-- ```python indices = [[1], [0]] params = [[<tt>a</tt>, <tt>b</tt>],
-- [<tt>c</tt>, <tt>d</tt>]] output = [[<tt>c</tt>, <tt>d</tt>],
-- [<tt>a</tt>, <tt>b</tt>]] ```
--
-- Indexing into a 3-tensor:
--
-- ```python indices = [[1]] params = [[[<tt>a0</tt>, <tt>b0</tt>],
-- [<tt>c0</tt>, <tt>d0</tt>]], [[<tt>a1</tt>, <tt>b1</tt>],
-- [<tt>c1</tt>, <tt>d1</tt>]]] output = [[[<tt>a1</tt>, <tt>b1</tt>],
-- [<tt>c1</tt>, <tt>d1</tt>]]]
--
-- indices = [[0, 1], [1, 0]] params = [[[<tt>a0</tt>, <tt>b0</tt>],
-- [<tt>c0</tt>, <tt>d0</tt>]], [[<tt>a1</tt>, <tt>b1</tt>],
-- [<tt>c1</tt>, <tt>d1</tt>]]] output = [[<tt>c0</tt>, <tt>d0</tt>],
-- [<tt>a1</tt>, <tt>b1</tt>]]
--
-- indices = [[0, 0, 1], [1, 0, 1]] params = [[[<tt>a0</tt>,
-- <tt>b0</tt>], [<tt>c0</tt>, <tt>d0</tt>]], [[<tt>a1</tt>,
-- <tt>b1</tt>], [<tt>c1</tt>, <tt>d1</tt>]]] output = [<tt>b0</tt>,
-- <tt>b1</tt>] ```
--
-- Batched indexing into a matrix:
--
-- ```python indices = [[[0, 0]], [[0, 1]]] params = [[<tt>a</tt>,
-- <tt>b</tt>], [<tt>c</tt>, <tt>d</tt>]] output = [[<tt>a</tt>],
-- [<tt>b</tt>]] ```
--
-- Batched slice indexing into a matrix:
--
-- ```python indices = [[[1]], [[0]]] params = [[<tt>a</tt>, <tt>b</tt>],
-- [<tt>c</tt>, <tt>d</tt>]] output = [[[<tt>c</tt>, <tt>d</tt>]],
-- [[<tt>a</tt>, <tt>b</tt>]]] ```
--
-- Batched indexing into a 3-tensor:
--
-- ```python indices = [[[1]], [[0]]] params = [[[<tt>a0</tt>,
-- <tt>b0</tt>], [<tt>c0</tt>, <tt>d0</tt>]], [[<tt>a1</tt>,
-- <tt>b1</tt>], [<tt>c1</tt>, <tt>d1</tt>]]] output = [[[[<tt>a1</tt>,
-- <tt>b1</tt>], [<tt>c1</tt>, <tt>d1</tt>]]], [[[<tt>a0</tt>,
-- <tt>b0</tt>], [<tt>c0</tt>, <tt>d0</tt>]]]]
--
-- indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] params =
-- [[[<tt>a0</tt>, <tt>b0</tt>], [<tt>c0</tt>, <tt>d0</tt>]],
-- [[<tt>a1</tt>, <tt>b1</tt>], [<tt>c1</tt>, <tt>d1</tt>]]] output =
-- [[[<tt>c0</tt>, <tt>d0</tt>], [<tt>a1</tt>, <tt>b1</tt>]],
-- [[<tt>a0</tt>, <tt>b0</tt>], [<tt>c1</tt>, <tt>d1</tt>]]]
--
-- indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] params =
-- [[[<tt>a0</tt>, <tt>b0</tt>], [<tt>c0</tt>, <tt>d0</tt>]],
-- [[<tt>a1</tt>, <tt>b1</tt>], [<tt>c1</tt>, <tt>d1</tt>]]] output =
-- [[<tt>b0</tt>, <tt>b1</tt>], [<tt>d0</tt>, <tt>c1</tt>]] ```
gatherNd :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams
gatherNd' :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams
-- | Store the input tensor in the state of the current session.
getSessionHandle :: (TensorType t) => Tensor v'1 t -> Tensor Build ByteString
getSessionHandle' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString
-- | Get the value of the tensor specified by its handle.
getSessionTensor :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor Build dtype
getSessionTensor' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor Build dtype
-- | Returns the truth value of (x &gt; y) element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>Greater</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
greater :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
greater' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
-- | Returns the truth value of (x &gt;= y) element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>GreaterEqual</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
greaterEqual :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
greaterEqual' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
-- | Convert one or more images from HSV to RGB.
--
-- Outputs a tensor of the same shape as the <tt>images</tt> tensor,
-- containing the RGB value of the pixels. The output is only well
-- defined if the value in <tt>images</tt> are in `[0,1]`.
--
-- See <tt>rgb_to_hsv</tt> for a description of the HSV encoding.
hSVToRGB :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
hSVToRGB' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Creates a non-initialized hash table.
--
-- This op creates a hash table, specifying the type of its keys and
-- values. Before using the table you will have to initialize it. After
-- initialization the table will be immutable.
hashTable :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString)
hashTable' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString)
-- | Outputs a <tt>Summary</tt> protocol buffer with a histogram.
--
-- The generated <a>`Summary`</a> has one summary value containing a
-- histogram for <tt>values</tt>.
--
-- This op reports an <tt>InvalidArgument</tt> error if any value is not
-- finite.
histogramSummary :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
histogramSummary' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
-- | Compute the inverse 1-dimensional discrete Fourier Transform over the
-- inner-most
--
-- dimension of <tt>input</tt>.
iFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
iFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
-- | Compute the inverse 2-dimensional discrete Fourier Transform over the
-- inner-most
--
-- 2 dimensions of <tt>input</tt>.
iFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
iFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
-- | Compute the inverse 3-dimensional discrete Fourier Transform over the
-- inner-most
--
-- 3 dimensions of <tt>input</tt>.
iFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
iFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
-- | Return a tensor with the same shape and contents as the input tensor
-- or value.
identity :: (TensorType t) => Tensor v'1 t -> Tensor Build t
identity' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | A Reader that outputs the queued work as both the key and value.
--
-- To use, enqueue strings in a Queue. ReaderRead will take the front
-- work string and output (work, work).
identityReader :: (MonadBuild m') => m' (Tensor Ref ByteString)
identityReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString)
-- | A Reader that outputs the queued work as both the key and value.
--
-- To use, enqueue strings in a Queue. ReaderRead will take the front
-- work string and output (work, work).
identityReaderV2 :: (MonadBuild m') => m' (ResourceHandle)
identityReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle)
-- | Compute the lower regularized incomplete Gamma function `Q(a, x)`.
--
-- The lower regularized incomplete Gamma function is defined as:
--
-- ``` P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) ``` where ```
-- gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt ``` is the lower
-- incomplete Gamma function.
--
-- Note, above `Q(a, x)` (<tt>Igammac</tt>) is the upper regularized
-- complete Gamma function.
igamma :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
igamma' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Compute the upper regularized incomplete Gamma function `Q(a, x)`.
--
-- The upper regularized incomplete Gamma function is defined as:
--
-- ``` Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) ``` where ```
-- Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt ``` is the upper
-- incomplete Gama function.
--
-- Note, above `P(a, x)` (<tt>Igamma</tt>) is the lower regularized
-- complete Gamma function.
igammac :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
igammac' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Returns the imaginary part of a complex number.
--
-- Given a tensor <tt>input</tt> of complex numbers, this operation
-- returns a tensor of type <tt>float</tt> that is the imaginary part of
-- each element in <tt>input</tt>. All elements in <tt>input</tt> must be
-- complex numbers of the form \(a + bj\), where *a* is the real part and
-- *b* is the imaginary part returned by this operation.
--
-- For example:
--
-- ``` # tensor <tt>input</tt> is [-2.25 + 4.75j, 3.25 + 5.75j]
-- tf.imag(input) ==&gt; [4.75, 5.75] ```
imag :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout
imag' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout
-- | Outputs a <tt>Summary</tt> protocol buffer with images.
--
-- The summary has up to <tt>max_images</tt> summary values containing
-- images. The images are built from <tt>tensor</tt> which must be 4-D
-- with shape `[batch_size, height, width, channels]` and where
-- <tt>channels</tt> can be:
--
-- <ul>
-- <li>1: <tt>tensor</tt> is interpreted as Grayscale.</li>
-- <li>3: <tt>tensor</tt> is interpreted as RGB.</li>
-- <li>4: <tt>tensor</tt> is interpreted as RGBA.</li>
-- </ul>
--
-- The images have the same number of channels as the input tensor. For
-- float input, the values are normalized one image at a time to fit in
-- the range `[0, 255]`. <tt>uint8</tt> values are unchanged. The op uses
-- two different normalization algorithms:
--
-- <ul>
-- <li>If the input values are all positive, they are rescaled so the
-- largest one is 255.</li>
-- <li>If any input value is negative, the values are shifted so input
-- value 0.0 is at 127. They are then rescaled so that either the
-- smallest value is 0, or the largest one is 255.</li>
-- </ul>
--
-- The <tt>tag</tt> argument is a scalar <a>Tensor</a> of type
-- <tt>string</tt>. It is used to build the <tt>tag</tt> of the summary
-- values:
--
-- <ul>
-- <li>If <tt>max_images</tt> is 1, the summary value tag is
-- '*tag*/image'.</li>
-- <li>If <tt>max_images</tt> is greater than 1, the summary value tags
-- are generated sequentially as '*tag*/image/0', '*tag*/image/1',
-- etc.</li>
-- </ul>
--
-- The <tt>bad_color</tt> argument is the color to use in the generated
-- images for non-finite input values. It is a <tt>unit8</tt> 1-D tensor
-- of length <tt>channels</tt>. Each element must be in the range `[0,
-- 255]` (It represents the value of a pixel in the output image).
-- Non-finite values in the input tensor are replaced by this tensor in
-- the output image. The default value is the color red.
imageSummary :: (OneOf '[Word16, Word8, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
imageSummary' :: (OneOf '[Word16, Word8, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
-- | Returns immutable tensor from memory region.
--
-- The current implementation memmaps the tensor from a file.
immutableConst :: (TensorType dtype) => Shape -> Tensor Build dtype
immutableConst' :: (TensorType dtype) => OpParams -> Shape -> Tensor Build dtype
-- | Says whether the targets are in the top <tt>K</tt> predictions.
--
-- This outputs a <tt>batch_size</tt> bool array, an entry `out[i]` is
-- <tt>true</tt> if the prediction for the target class is among the top
-- <tt>k</tt> predictions among all predictions for example <tt>i</tt>.
-- Note that the behavior of <tt>InTopK</tt> differs from the
-- <tt>TopK</tt> op in its handling of ties; if multiple classes have the
-- same prediction value and straddle the top-<tt>k</tt> boundary, all of
-- those classes are considered to be in the top <tt>k</tt>.
--
-- More formally, let
--
-- \(predictions_i\) be the predictions for all classes for example
-- <tt>i</tt>, \(targets_i\) be the target class for example <tt>i</tt>,
-- \(out_i\) be the output for example <tt>i</tt>,
--
-- $$out_i = predictions_{i, targets_i} in
-- TopKIncludingTies(predictions_i)$$
inTopK :: (OneOf '[Int32, Int64] t) => Int64 -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build Bool
inTopK' :: (OneOf '[Int32, Int64] t) => OpParams -> Int64 -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build Bool
-- | Table initializer that takes two tensors for keys and values
-- respectively.
initializeTable :: (MonadBuild m', TensorType tkey, TensorType tval) => Tensor Ref ByteString -> Tensor v'2 tkey -> Tensor v'3 tval -> m' (ControlNode)
initializeTable' :: (MonadBuild m', TensorType tkey, TensorType tval) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tkey -> Tensor v'3 tval -> m' (ControlNode)
-- | Initializes a table from a text file.
--
-- It inserts one key-value pair into the table for each line of the
-- file. The key and value is extracted from the whole line content,
-- elements from the split line based on <tt>delimiter</tt> or the line
-- number (starting from zero). Where to extract the key and value from a
-- line is specified by <tt>key_index</tt> and <tt>value_index</tt>.
--
-- <ul>
-- <li>A value of -1 means use the line number(starting from zero),
-- expects <tt>int64</tt>.</li>
-- <li>A value of -2 means use the whole line content, expects
-- <tt>string</tt>.</li>
-- <li>A value &gt;= 0 means use the index (starting at zero) of the
-- split line based on <tt>delimiter</tt>.</li>
-- </ul>
initializeTableFromTextFile :: (MonadBuild m') => Int64 -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode)
initializeTableFromTextFile' :: (MonadBuild m') => OpParams -> Int64 -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode)
-- | Computes the reciprocal of x element-wise.
--
-- I.e., \(y = 1 / x\).
inv :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
inv' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the gradient for the inverse of <tt>x</tt> wrt its input.
--
-- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and <tt>dy</tt> is
-- the corresponding input gradient.
invGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
invGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes the inverse permutation of a tensor.
--
-- This operation computes the inverse of an index permutation. It takes
-- a 1-D integer tensor <tt>x</tt>, which represents the indices of a
-- zero-based array, and swaps each value with its index position. In
-- other words, for an output tensor <tt>y</tt> and an input tensor
-- <tt>x</tt>, this operation computes the following:
--
-- `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
--
-- The values must include 0. There can be no duplicate values or
-- negative values.
--
-- For example:
--
-- ```prettyprint # tensor <tt>x</tt> is [3, 4, 0, 2, 1]
-- invert_permutation(x) ==&gt; [2, 4, 3, 0, 1] ```
invertPermutation :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor Build t
invertPermutation' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns which elements of x are finite.
--
-- <tt>compatibility(numpy) Equivalent to np.isfinite
-- </tt>end_compatibility
isFinite :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool
isFinite' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool
-- | Returns which elements of x are Inf.
--
-- <tt>compatibility(numpy) Equivalent to np.isinf </tt>end_compatibility
isInf :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool
isInf' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool
-- | Returns which elements of x are NaN.
--
-- <tt>compatibility(numpy) Equivalent to np.isnan </tt>end_compatibility
isNan :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool
isNan' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool
-- | Checks whether a tensor has been initialized.
--
-- Outputs boolean scalar indicating whether the tensor has been
-- initialized.
isVariableInitialized :: (MonadBuild m', TensorType dtype) => Tensor Ref dtype -> m' (Tensor Value Bool)
isVariableInitialized' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref dtype -> m' (Tensor Value Bool)
-- | L2 Loss.
--
-- Computes half the L2 norm of a tensor without the <a>sqrt</a>:
--
-- output = sum(t ** 2) / 2
l2Loss :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t
l2Loss' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Local Response Normalization.
--
-- The 4-D <tt>input</tt> tensor is treated as a 3-D array of 1-D vectors
-- (along the last dimension), and each vector is normalized
-- independently. Within a given vector, each component is divided by the
-- weighted, squared sum of inputs within <tt>depth_radius</tt>. In
-- detail,
--
-- sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d +
-- depth_radius + 1] ** 2) output = input / (bias + alpha * sqr_sum) **
-- beta
--
-- For details, see <a>Krizhevsky et al., ImageNet classification with
-- deep convolutional neural networks (NIPS 2012)</a>.
lRN :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor Build t
lRN' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Gradients for Local Response Normalization.
lRNGrad :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
lRNGrad' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
-- | Generates labels for candidate sampling with a learned unigram
-- distribution.
--
-- See explanations of candidate sampling and the data formats at
-- go/candidate-sampling.
--
-- For each batch, this op picks a single set of sampled candidate
-- labels.
--
-- The advantages of sampling candidates per-batch are simplicity and the
-- possibility of efficient dense matrix multiplication. The disadvantage
-- is that the sampled candidates must be chosen independently of the
-- context and of the true labels.
learnedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
learnedUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
-- | Returns the truth value of (x &lt; y) element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>Less</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
less :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
less' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
-- | Returns the truth value of (x &lt;= y) element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>LessEqual</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
lessEqual :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
lessEqual' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
-- | Computes the log of the absolute value of `Gamma(x)` element-wise.
lgamma :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
lgamma' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Generates values in an interval.
--
-- A sequence of <tt>num</tt> evenly-spaced values are generated
-- beginning at <tt>start</tt>. If `num &gt; 1`, the values in the
-- sequence increase by `stop - start / num - 1`, so that the last one is
-- exactly <tt>stop</tt>.
--
-- For example:
--
-- ``` tf.linspace(10.0, 12.0, 3, name="linspace") =&gt; [ 10.0 11.0
-- 12.0] ```
linSpace :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 tidx -> Tensor Build t
linSpace' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 tidx -> Tensor Build t
-- | Computes the difference between two lists of numbers or strings.
--
-- Given a list <tt>x</tt> and a list <tt>y</tt>, this operation returns
-- a list <tt>out</tt> that represents all values that are in <tt>x</tt>
-- but not in <tt>y</tt>. The returned list <tt>out</tt> is sorted in the
-- same order that the numbers appear in <tt>x</tt> (duplicates are
-- preserved). This operation also returns a list <tt>idx</tt> that
-- represents the position of each <tt>out</tt> element in <tt>x</tt>. In
-- other words:
--
-- `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
--
-- For example, given this input:
--
-- ```prettyprint x = [1, 2, 3, 4, 5, 6] y = [1, 3, 5] ```
--
-- This operation would return:
--
-- ```prettyprint out ==&gt; [2, 4, 6] idx ==&gt; [1, 3, 5] ```
listDiff :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build out_idx)
listDiff' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build out_idx)
-- | Computes natural logarithm of x element-wise.
--
-- I.e., \(y = log_e x\).
log :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
log' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes natural logarithm of (1 + x) element-wise.
--
-- I.e., \(y = log_e (1 + x)\).
log1p :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
log1p' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes log softmax activations.
--
-- For each batch <tt>i</tt> and class <tt>j</tt> we have
--
-- logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
logSoftmax :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
logSoftmax' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Generates labels for candidate sampling with a log-uniform
-- distribution.
--
-- See explanations of candidate sampling and the data formats at
-- go/candidate-sampling.
--
-- For each batch, this op picks a single set of sampled candidate
-- labels.
--
-- The advantages of sampling candidates per-batch are simplicity and the
-- possibility of efficient dense matrix multiplication. The disadvantage
-- is that the sampled candidates must be chosen independently of the
-- context and of the true labels.
logUniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
logUniformCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
-- | Returns the truth value of x AND y element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>LogicalAnd</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
logicalAnd :: Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool
logicalAnd' :: OpParams -> Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool
-- | Returns the truth value of NOT x element-wise.
logicalNot :: Tensor v'1 Bool -> Tensor Build Bool
logicalNot' :: OpParams -> Tensor v'1 Bool -> Tensor Build Bool
-- | Returns the truth value of x OR y element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>LogicalOr</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
logicalOr :: Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool
logicalOr' :: OpParams -> Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool
-- | Outputs all keys and values in the table.
lookupTableExport :: (MonadBuild m', TensorType tkeys, TensorType tvalues) => Tensor Ref ByteString -> m' ((Tensor Value tkeys, Tensor Value tvalues))
lookupTableExport' :: (MonadBuild m', TensorType tkeys, TensorType tvalues) => OpParams -> Tensor Ref ByteString -> m' ((Tensor Value tkeys, Tensor Value tvalues))
-- | Looks up keys in a table, outputs the corresponding values.
--
-- The tensor <tt>keys</tt> must of the same type as the keys of the
-- table. The output <tt>values</tt> is of the type of the table values.
--
-- The scalar <tt>default_value</tt> is the value output for keys not
-- present in the table. It must also be of the same type as the table
-- values.
lookupTableFind :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (Tensor Value tout)
lookupTableFind' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (Tensor Value tout)
-- | Replaces the contents of the table with the specified keys and values.
--
-- The tensor <tt>keys</tt> must be of the same type as the keys of the
-- table. The tensor <tt>values</tt> must be of the type of the table
-- values.
lookupTableImport :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode)
lookupTableImport' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode)
-- | Updates the table to associates keys with values.
--
-- The tensor <tt>keys</tt> must be of the same type as the keys of the
-- table. The tensor <tt>values</tt> must be of the type of the table
-- values.
lookupTableInsert :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode)
lookupTableInsert' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode)
-- | Computes the number of elements in the given table.
lookupTableSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64)
lookupTableSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64)
-- | Forwards the input to the output.
--
-- This operator represents the loop termination condition used by the
-- "pivot" switches of a loop.
loopCond :: Tensor v'1 Bool -> Tensor Build Bool
loopCond' :: OpParams -> Tensor v'1 Bool -> Tensor Build Bool
-- | Multiply the matrix "a" by the matrix "b".
--
-- The inputs must be two-dimensional matrices and the inner dimension of
-- "a" (after being transposed if transpose_a is true) must match the
-- outer dimension of "b" (after being transposed if transposed_b is
-- true).
--
-- <ul>
-- <li>Note*: The default kernel implementation for MatMul on GPUs uses
-- cublas.</li>
-- </ul>
matMul :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
matMul' :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Returns the set of files matching a pattern.
--
-- Note that this routine only supports wildcard characters in the
-- basename portion of the pattern, not in the directory portion.
matchingFiles :: Tensor v'1 ByteString -> Tensor Build ByteString
matchingFiles' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString
-- | Copy a tensor setting everything outside a central band in each
-- innermost matrix
--
-- to zero.
--
-- The <tt>band</tt> part is computed as follows: Assume <tt>input</tt>
-- has <tt>k</tt> dimensions `[I, J, K, ..., M, N]`, then the output is a
-- tensor with the same shape where
--
-- `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m,
-- n]`.
--
-- The indicator function
--
-- `in_band(m, n) = (num_lower &lt; 0 || (m-n) &lt;= num_lower))
-- &amp;&amp; (num_upper &lt; 0 || (n-m) &lt;= num_upper)`.
--
-- For example:
--
-- ```prettyprint # if <tt>input</tt> is [[ 0, 1, 2, 3] [-1, 0, 1, 2]
-- [-2, -1, 0, 1] [-3, -2, -1, 0]],
--
-- tf.matrix_band_part(input, 1, -1) ==&gt; [[ 0, 1, 2, 3] [-1, 0, 1, 2]
-- [ 0, -1, 0, 1] [ 0, 0, -1, 0]],
--
-- tf.matrix_band_part(input, 2, 1) ==&gt; [[ 0, 1, 0, 0] [-1, 0, 1, 0]
-- [-2, -1, 0, 1] [ 0, -2, -1, 0]] ```
--
-- Useful special cases:
--
-- ```prettyprint tf.matrix_band_part(input, 0, -1) ==&gt; Upper
-- triangular part. tf.matrix_band_part(input, -1, 0) ==&gt; Lower
-- triangular part. tf.matrix_band_part(input, 0, 0) ==&gt; Diagonal. ```
matrixBandPart :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t
matrixBandPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t
-- | Computes the determinant of one ore more square matrices.
--
-- The input is a tensor of shape `[..., M, M]` whose inner-most 2
-- dimensions form square matrices. The output is a tensor containing the
-- determinants for all input submatrices `[..., :, :]`.
matrixDeterminant :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
matrixDeterminant' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns a batched diagonal tensor with a given batched diagonal
-- values.
--
-- Given a <tt>diagonal</tt>, this operation returns a tensor with the
-- <tt>diagonal</tt> and everything else padded with zeros. The diagonal
-- is computed as follows:
--
-- Assume <tt>diagonal</tt> has <tt>k</tt> dimensions `[I, J, K, ...,
-- N]`, then the output is a tensor of rank `k+1` with dimensions [I, J,
-- K, ..., N, N]` where:
--
-- `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
--
-- For example:
--
-- ```prettyprint # <tt>diagonal</tt> is [[1, 2, 3, 4], [5, 6, 7, 8]]
--
-- and diagonal.shape = (2, 4)
--
-- tf.matrix_diag(diagonal) ==&gt; [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3,
-- 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0,
-- 8]]]
--
-- which has shape (2, 4, 4) ```
matrixDiag :: (TensorType t) => Tensor v'1 t -> Tensor Build t
matrixDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns the batched diagonal part of a batched tensor.
--
-- This operation returns a tensor with the <tt>diagonal</tt> part of the
-- batched <tt>input</tt>. The <tt>diagonal</tt> part is computed as
-- follows:
--
-- Assume <tt>input</tt> has <tt>k</tt> dimensions `[I, J, K, ..., M,
-- N]`, then the output is a tensor of rank `k - 1` with dimensions `[I,
-- J, K, ..., min(M, N)]` where:
--
-- `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
--
-- The input must be at least a matrix.
--
-- For example:
--
-- ```prettyprint # <tt>input</tt> is [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0,
-- 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0,
-- 8]]]
--
-- and input.shape = (2, 4, 4)
--
-- tf.matrix_diag_part(input) ==&gt; [[1, 2, 3, 4], [5, 6, 7, 8]]
--
-- which has shape (2, 4) ```
matrixDiagPart :: (TensorType t) => Tensor v'1 t -> Tensor Build t
matrixDiagPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the inverse of one or more square invertible matrices or
-- their
--
-- adjoints (conjugate transposes).
--
-- The input is a tensor of shape `[..., M, M]` whose inner-most 2
-- dimensions form square matrices. The output is a tensor of the same
-- shape as the input containing the inverse for all input submatrices
-- `[..., :, :]`.
--
-- The op uses LU decomposition with partial pivoting to compute the
-- inverses.
--
-- If a matrix is not invertible there is no guarantee what the op does.
-- It may detect the condition and raise an exception or it may simply
-- return a garbage result.
matrixInverse :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
matrixInverse' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns a batched matrix tensor with new batched diagonal values.
--
-- Given <tt>input</tt> and <tt>diagonal</tt>, this operation returns a
-- tensor with the same shape and values as <tt>input</tt>, except for
-- the main diagonal of the innermost matrices. These will be overwritten
-- by the values in <tt>diagonal</tt>.
--
-- The output is computed as follows:
--
-- Assume <tt>input</tt> has `k+1` dimensions `[I, J, K, ..., M, N]` and
-- <tt>diagonal</tt> has <tt>k</tt> dimensions `[I, J, K, ..., min(M,
-- N)]`. Then the output is a tensor of rank `k+1` with dimensions `[I,
-- J, K, ..., M, N]` where:
--
-- <ul>
-- <li>`output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m ==
-- n`.</li>
-- <li>`output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m !=
-- n`.</li>
-- </ul>
matrixSetDiag :: (TensorType t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
matrixSetDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Solves systems of linear equations.
--
-- <tt>Matrix</tt> is a tensor of shape `[..., M, M]` whose inner-most 2
-- dimensions form square matrices. <tt>Rhs</tt> is a tensor of shape
-- `[..., M, K]`. The <tt>output</tt> is a tensor shape `[..., M, K]`. If
-- <tt>adjoint</tt> is <a>False</a> then each output matrix satisfies
-- `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If
-- <tt>adjoint</tt> is <a>True</a> then each output matrix satisfies
-- `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
matrixSolve :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
matrixSolve' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Solves one or more linear least-squares problems.
--
-- <tt>matrix</tt> is a tensor of shape `[..., M, N]` whose inner-most 2
-- dimensions form matrices of size `[M, N]`. Rhs is a tensor of shape
-- `[..., M, K]`. The output is a tensor shape `[..., N, K]` where each
-- output matrix solves each of the equations matrix[..., :, :] *
-- output[..., :, :] = rhs[..., :, :] in the least squares sense.
--
-- matrix and right-hand sides in the batch:
--
-- <tt>matrix</tt>=\(A in Re^{m times n}\), <tt>rhs</tt>=\(B in Re^{m
-- times k}\), <tt>output</tt>=\(X in Re^{n times k}\),
-- <tt>l2_regularizer</tt>=\(lambda\).
--
-- If <tt>fast</tt> is <a>True</a>, then the solution is computed by
-- solving the normal equations using Cholesky decomposition.
-- Specifically, if \(m ge n\) then \(X = (A^T A + lambda I)^{-1} A^T
-- B\), which solves the least-squares problem \(X = mathrm{argmin}_{Z in
-- Re^{n times k} } ||A Z - B||_F^2 + lambda ||Z||_F^2\). If \(m lt n\)
-- then <tt>output</tt> is computed as \(X = A^T (A A^T + lambda I)^{-1}
-- B\), which (for \(lambda = 0\)) is the minimum-norm solution to the
-- under-determined linear system, i.e. \(X = mathrm{argmin}_{Z in Re^{n
-- times k} } ||Z||_F^2 \), subject to \(A Z = B\). Notice that the fast
-- path is only numerically stable when \(A\) is numerically full rank
-- and has a condition number \(mathrm{cond}(A) lt
-- frac{1}{sqrt{epsilon_{mach} } }\) or\(lambda\) is sufficiently large.
--
-- If <tt>fast</tt> is <a>False</a> an algorithm based on the numerically
-- robust complete orthogonal decomposition is used. This computes the
-- minimum-norm least-squares solution, even when \(A\) is rank
-- deficient. This path is typically 6-7 times slower than the fast path.
-- If <tt>fast</tt> is <a>False</a> then <tt>l2_regularizer</tt> is
-- ignored.
matrixSolveLs :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t
matrixSolveLs' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t
-- | Solves systems of linear equations with upper or lower triangular
-- matrices by
--
-- backsubstitution.
--
-- <tt>matrix</tt> is a tensor of shape `[..., M, M]` whose inner-most 2
-- dimensions form square matrices. If <tt>lower</tt> is <a>True</a> then
-- the strictly upper triangular part of each inner-most matrix is
-- assumed to be zero and not accessed. If <tt>lower</tt> is False then
-- the strictly lower triangular part of each inner-most matrix is
-- assumed to be zero and not accessed. <tt>rhs</tt> is a tensor of shape
-- `[..., M, K]`.
--
-- The output is a tensor of shape `[..., M, K]`. If <tt>adjoint</tt> is
-- <a>True</a> then the innermost matrices in output` satisfy matrix
-- equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If
-- <tt>adjoint</tt> is <a>False</a> then the strictly then the innermost
-- matrices in <tt>output</tt> satisfy matrix equations
-- `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
matrixTriangularSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
matrixTriangularSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes the maximum of elements across dimensions of a tensor.
--
-- Reduces <tt>input</tt> along the dimensions given in
-- <tt>reduction_indices</tt>. Unless <tt>keep_dims</tt> is true, the
-- rank of the tensor is reduced by 1 for each entry in
-- <tt>reduction_indices</tt>. If <tt>keep_dims</tt> is true, the reduced
-- dimensions are retained with length 1.
max :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
max' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
-- | Performs max pooling on the input.
maxPool :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor Build t
maxPool' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Performs 3D max pooling on the input.
maxPool3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t
maxPool3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes gradients of max pooling function.
maxPool3DGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 t -> Tensor Build t
maxPool3DGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 t -> Tensor Build t
-- | Computes gradients of the maxpooling function.
maxPoolGrad :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
maxPoolGrad' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
-- | Computes gradients of the maxpooling function.
maxPoolGradWithArgmax :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 targmax -> Tensor Build t
maxPoolGradWithArgmax' :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 targmax -> Tensor Build t
-- | Performs max pooling on the input and outputs both max values and
-- indices.
--
-- The indices in <tt>argmax</tt> are flattened, so that a maximum value
-- at position `[b, y, x, c]` becomes flattened index `((b * height + y)
-- * width + x) * channels + c`.
maxPoolWithArgmax :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build targmax)
maxPoolWithArgmax' :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build targmax)
-- | Returns the max of x and y (i.e. x &gt; y ? x : y) element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>Maximum</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
maximum :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
maximum' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes the mean of elements across dimensions of a tensor.
--
-- Reduces <tt>input</tt> along the dimensions given in
-- <tt>reduction_indices</tt>. Unless <tt>keep_dims</tt> is true, the
-- rank of the tensor is reduced by 1 for each entry in
-- <tt>reduction_indices</tt>. If <tt>keep_dims</tt> is true, the reduced
-- dimensions are retained with length 1.
mean :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
mean' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
-- | Forwards the value of an available tensor from <tt>inputs</tt> to
-- <tt>output</tt>.
--
-- <tt>Merge</tt> waits for at least one of the tensors in
-- <tt>inputs</tt> to become available. It is usually combined with
-- <tt>Switch</tt> to implement branching.
--
-- <tt>Merge</tt> forwards the first tensor for become available to
-- <tt>output</tt>, and sets <tt>value_index</tt> to its index in
-- <tt>inputs</tt>.
merge :: (TensorType t) => [Tensor v'1 t] -> (Tensor Build t, Tensor Build Int32)
merge' :: (TensorType t) => OpParams -> [Tensor v'1 t] -> (Tensor Build t, Tensor Build Int32)
-- | Merges summaries.
--
-- This op creates a <a>`Summary`</a> protocol buffer that contains the
-- union of all the values in the input summaries.
--
-- When the Op is run, it reports an <tt>InvalidArgument</tt> error if
-- multiple values in the summaries to merge use the same tag.
mergeSummary :: [Tensor v'1 ByteString] -> Tensor Build ByteString
mergeSummary' :: OpParams -> [Tensor v'1 ByteString] -> Tensor Build ByteString
-- | V2 format specific: merges the metadata files of sharded checkpoints.
-- The
--
-- result is one logical checkpoint, with one physical metadata file and
-- renamed data files.
--
-- Intended for "grouping" multiple checkpoints in a sharded checkpoint
-- setup.
--
-- If delete_old_dirs is true, attempts to delete recursively the dirname
-- of each path in the input checkpoint_prefixes. This is useful when
-- those paths are non user-facing temporary locations.
mergeV2Checkpoints :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode)
mergeV2Checkpoints' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode)
-- | Computes the minimum of elements across dimensions of a tensor.
--
-- Reduces <tt>input</tt> along the dimensions given in
-- <tt>reduction_indices</tt>. Unless <tt>keep_dims</tt> is true, the
-- rank of the tensor is reduced by 1 for each entry in
-- <tt>reduction_indices</tt>. If <tt>keep_dims</tt> is true, the reduced
-- dimensions are retained with length 1.
min :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
min' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
-- | Returns the min of x and y (i.e. x &lt; y ? x : y) element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>Minimum</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
minimum :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
minimum' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Pads a tensor with mirrored values.
--
-- This operation pads a <tt>input</tt> with mirrored values according to
-- the <tt>paddings</tt> you specify. <tt>paddings</tt> is an integer
-- tensor with shape `[n, 2]`, where n is the rank of <tt>input</tt>. For
-- each dimension D of <tt>input</tt>, `paddings[D, 0]` indicates how
-- many values to add before the contents of <tt>input</tt> in that
-- dimension, and `paddings[D, 1]` indicates how many values to add after
-- the contents of <tt>input</tt> in that dimension. Both `paddings[D,
-- 0]` and `paddings[D, 1]` must be no greater than `input.dim_size(D)`
-- (or `input.dim_size(D) - 1`) if <tt>copy_border</tt> is true (if
-- false, respectively).
--
-- The padded size of each dimension D of the output is:
--
-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
--
-- For example:
--
-- ```prettyprint # <tt>t</tt> is [[1, 2, 3], [4, 5, 6]]. #
-- <tt>paddings</tt> is [[1, 1]], [2, 2]]. # <tt>mode</tt> is SYMMETRIC.
-- # rank of <tt>t</tt> is 2. pad(t, paddings) ==&gt; [[2, 1, 1, 2, 3, 3,
-- 2] [2, 1, 1, 2, 3, 3, 2] [5, 4, 4, 5, 6, 6, 5] [5, 4, 4, 5, 6, 6, 5]]
-- ```
mirrorPad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
mirrorPad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
-- | Gradient op for <tt>MirrorPad</tt> op. This op folds a mirror-padded
-- tensor.
--
-- This operation folds the padded areas of <tt>input</tt> by
-- <tt>MirrorPad</tt> according to the <tt>paddings</tt> you specify.
-- <tt>paddings</tt> must be the same as <tt>paddings</tt> argument given
-- to the corresponding <tt>MirrorPad</tt> op.
--
-- The folded size of each dimension D of the output is:
--
-- `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
--
-- For example:
--
-- ```prettyprint # <tt>t</tt> is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. #
-- <tt>paddings</tt> is [[0, 1]], [0, 1]]. # <tt>mode</tt> is SYMMETRIC.
-- # rank of <tt>t</tt> is 2. pad(t, paddings) ==&gt; [[ 1, 5] [11, 28]]
-- ```
mirrorPadGrad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
mirrorPadGrad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
-- | Returns element-wise remainder of division.
--
-- <ul>
-- <li>NOTE*: <tt>Mod</tt> supports broadcasting. More about broadcasting
-- <a>here</a></li>
-- </ul>
mod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
mod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Returns x * y element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>Mul</tt> supports broadcasting. More about broadcasting
-- <a>here</a></li>
-- </ul>
mul :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
mul' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Draws samples from a multinomial distribution.
multinomial :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> m' (Tensor Value Int64)
multinomial' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> m' (Tensor Value Int64)
-- | Creates an empty hash table that uses tensors as the backing store. It
-- uses
--
-- "open addressing" with quadratic reprobing to resolve collisions.
--
-- This op creates a mutable hash table, specifying the type of its keys
-- and values. Each value must be a scalar. Data can be inserted into the
-- table using the insert operations. It does not support the
-- initialization operation.
mutableDenseHashTable :: (MonadBuild m', TensorType key_dtype) => DataType -> Tensor v'1 key_dtype -> m' (Tensor Ref ByteString)
mutableDenseHashTable' :: (MonadBuild m', TensorType key_dtype) => OpParams -> DataType -> Tensor v'1 key_dtype -> m' (Tensor Ref ByteString)
-- | Creates an empty hash table.
--
-- This op creates a mutable hash table, specifying the type of its keys
-- and values. Each value must be a scalar. Data can be inserted into the
-- table using the insert operations. It does not support the
-- initialization operation.
mutableHashTable :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString)
mutableHashTable' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString)
-- | Creates an empty hash table.
--
-- This op creates a mutable hash table, specifying the type of its keys
-- and values. Each value must be a vector. Data can be inserted into the
-- table using the insert operations. It does not support the
-- initialization operation.
mutableHashTableOfTensors :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString)
mutableHashTableOfTensors' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString)
-- | Computes numerical negative value element-wise.
--
-- I.e., \(y = -x\).
neg :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
neg' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Training via negative sampling.
negTrain :: (MonadBuild m') => Int64 -> Tensor Ref Float -> Tensor Ref Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor v'5 Float -> m' (ControlNode)
negTrain' :: (MonadBuild m') => OpParams -> Int64 -> Tensor Ref Float -> Tensor Ref Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor v'5 Float -> m' (ControlNode)
-- | Makes its input available to the next iteration.
nextIteration :: (TensorType t) => Tensor v'1 t -> Tensor Build t
nextIteration' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Does nothing. Only useful as a placeholder for control edges.
noOp :: (MonadBuild m') => m' (ControlNode)
noOp' :: (MonadBuild m') => OpParams -> m' (ControlNode)
-- | Greedily selects a subset of bounding boxes in descending order of
-- score,
--
-- pruning away boxes that have high intersection-over-union (IOU)
-- overlap with previously selected boxes. Bounding boxes are supplied as
-- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of
-- any diagonal pair of box corners and the coordinates can be provided
-- as normalized (i.e., lying in the interval [0, 1]) or absolute. Note
-- that this algorithm is agnostic to where the origin is in the
-- coordinate system. Note that this algorithm is invariant to orthogonal
-- transformations and translations of the coordinate system; thus
-- translating or reflections of the coordinate system result in the same
-- boxes being selected by the algorithm.
--
-- The output of this operation is a set of integers indexing into the
-- input collection of bounding boxes representing the selected boxes.
-- The bounding box coordinates corresponding to the selected indices can
-- then be obtained using the `tf.gather operation`. For example:
--
-- selected_indices = tf.image.non_max_suppression( boxes, scores,
-- max_output_size, iou_threshold) selected_boxes = tf.gather(boxes,
-- selected_indices)
nonMaxSuppression :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor Build Int32
nonMaxSuppression' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor Build Int32
-- | Returns the truth value of (x != y) element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>NotEqual</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
notEqual :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
notEqual' :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
-- | Returns a one-hot tensor.
--
-- The locations represented by indices in <tt>indices</tt> take value
-- <tt>on_value</tt>, while all other locations take value
-- <tt>off_value</tt>.
--
-- If the input <tt>indices</tt> is rank <tt>N</tt>, the output will have
-- rank `N+1`, The new axis is created at dimension <tt>axis</tt>
-- (default: the new axis is appended at the end).
--
-- If <tt>indices</tt> is a scalar the output shape will be a vector of
-- length <tt>depth</tt>.
--
-- If <tt>indices</tt> is a vector of length <tt>features</tt>, the
-- output shape will be: ``` features x depth if axis == -1 depth x
-- features if axis == 0 ```
--
-- If <tt>indices</tt> is a matrix (batch) with shape `[batch,
-- features]`, the output shape will be: ``` batch x features x depth if
-- axis == -1 batch x depth x features if axis == 1 depth x batch x
-- features if axis == 0 ```
--
-- Examples =========
--
-- Suppose that
--
-- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0
-- axis = -1 ```
--
-- Then output is `[4 x 3]`:
--
-- ```output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2)
-- [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ```
--
-- Suppose that
--
-- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0
-- axis = 0 ```
--
-- Then output is `[3 x 4]`:
--
-- ```output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0
-- 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^
-- one_hot(1) ``` Suppose that
--
-- ``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value =
-- 0.0 axis = -1 ```
--
-- Then output is `[2 x 2 x 3]`:
--
-- ```output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] //
-- one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] //
-- one_hot(-1) ]```
oneHot :: (TensorType t, OneOf '[Int32, Int64, Word8] tI) => Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
oneHot' :: (TensorType t, OneOf '[Int32, Int64, Word8] tI) => OpParams -> Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
-- | Packs a list of <tt>N</tt> rank-<tt>R</tt> tensors into one
-- rank-`(R+1)` tensor.
--
-- Packs the <tt>N</tt> tensors in <tt>values</tt> into a tensor with
-- rank one higher than each tensor in <tt>values</tt>, by packing them
-- along the <tt>axis</tt> dimension. Given a list of tensors of shape
-- `(A, B, C)`;
--
-- if `axis == 0` then the <tt>output</tt> tensor will have the shape
-- `(N, A, B, C)`. if `axis == 1` then the <tt>output</tt> tensor will
-- have the shape `(A, N, B, C)`. Etc.
--
-- For example:
--
-- ```prettyprint # <tt>x</tt> is [1, 4] # <tt>y</tt> is [2, 5] #
-- <tt>z</tt> is [3, 6] pack([x, y, z]) =&gt; [[1, 4], [2, 5], [3, 6]] #
-- Pack along first dim. pack([x, y, z], axis=1) =&gt; [[1, 2, 3], [4, 5,
-- 6]] ```
--
-- This is the opposite of <a>unpack</a>.
pack :: (TensorType t) => [Tensor v'1 t] -> Tensor Build t
pack' :: (TensorType t) => OpParams -> [Tensor v'1 t] -> Tensor Build t
-- | Pads a tensor with zeros.
--
-- This operation pads a <tt>input</tt> with zeros according to the
-- <tt>paddings</tt> you specify. <tt>paddings</tt> is an integer tensor
-- with shape `[Dn, 2]`, where n is the rank of <tt>input</tt>. For each
-- dimension D of <tt>input</tt>, `paddings[D, 0]` indicates how many
-- zeros to add before the contents of <tt>input</tt> in that dimension,
-- and `paddings[D, 1]` indicates how many zeros to add after the
-- contents of <tt>input</tt> in that dimension.
--
-- The padded size of each dimension D of the output is:
--
-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
--
-- For example:
--
-- ```prettyprint # <tt>t</tt> is [[1, 1], [2, 2]] # <tt>paddings</tt> is
-- [[1, 1], [2, 2]] # rank of <tt>t</tt> is 2 pad(t, paddings) ==&gt;
-- [[0, 0, 0, 0, 0, 0] [0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0,
-- 0, 0]] ```
pad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
pad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
-- | A queue that produces elements in first-in first-out order.
--
-- Variable-size shapes are allowed by setting the corresponding shape
-- dimensions to 0 in the shape attr. In this case DequeueMany will pad
-- up to the maximum size of any given element in the minibatch. See
-- below for details.
paddingFIFOQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString)
paddingFIFOQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString)
-- | A queue that produces elements in first-in first-out order.
--
-- Variable-size shapes are allowed by setting the corresponding shape
-- dimensions to 0 in the shape attr. In this case DequeueMany will pad
-- up to the maximum size of any given element in the minibatch. See
-- below for details.
paddingFIFOQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle)
paddingFIFOQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle)
-- | Concatenates a list of <tt>N</tt> tensors along the first dimension.
--
-- The input tensors are all required to have size 1 in the first
-- dimension.
--
-- For example:
--
-- ```prettyprint # <tt>x</tt> is [[1, 4]] # <tt>y</tt> is [[2, 5]] #
-- <tt>z</tt> is [[3, 6]] parallel_concat([x, y, z]) =&gt; [[1, 4], [2,
-- 5], [3, 6]] # Pack along first dim. ```
--
-- The difference between concat and parallel_concat is that concat
-- requires all of the inputs be computed before the operation will begin
-- but doesn't require that the input shapes be known during graph
-- construction. Parallel concat will copy pieces of the input into the
-- output as they become available, in some situations this can provide a
-- performance benefit.
parallelConcat :: (TensorType t) => Shape -> [Tensor v'1 t] -> Tensor Build t
parallelConcat' :: (TensorType t) => OpParams -> Shape -> [Tensor v'1 t] -> Tensor Build t
-- | Outputs random values from a normal distribution. The parameters may
-- each be a
--
-- scalar which applies to the entire output, or a vector of length
-- shape[0] which stores the parameters for each batch.
parameterizedTruncatedNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 dtype -> Tensor v'3 dtype -> Tensor v'4 dtype -> Tensor v'5 dtype -> m' (Tensor Value dtype)
parameterizedTruncatedNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 dtype -> Tensor v'3 dtype -> Tensor v'4 dtype -> Tensor v'5 dtype -> m' (Tensor Value dtype)
-- | Transforms a vector of brain.Example protos (as strings) into typed
-- tensors.
parseExample :: (OneOfs '[ByteString, Int64, Float] sparse_types, OneOfs '[ByteString, Int64, Float] tdense) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> TensorList (v'5) tdense -> ([Tensor Build Int64], TensorList (Build) sparse_types, [Tensor Build Int64], TensorList (Build) tdense)
parseExample' :: (OneOfs '[ByteString, Int64, Float] sparse_types, OneOfs '[ByteString, Int64, Float] tdense) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> TensorList (v'5) tdense -> ([Tensor Build Int64], TensorList (Build) sparse_types, [Tensor Build Int64], TensorList (Build) tdense)
-- | Transforms a scalar brain.SequenceExample proto (as strings) into
-- typed tensors.
parseSingleSequenceExample :: (OneOfs '[ByteString, Int64, Float] context_sparse_types, OneOfs '[ByteString, Int64, Float] tcontext_dense, OneOfs '[ByteString, Int64, Float] feature_list_dense_types, OneOfs '[ByteString, Int64, Float] feature_list_sparse_types) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> [Tensor v'5 ByteString] -> [Tensor v'6 ByteString] -> TensorList (v'7) tcontext_dense -> Tensor v'8 ByteString -> ([Tensor Build Int64], TensorList (Build) context_sparse_types, [Tensor Build Int64], TensorList (Build) tcontext_dense, [Tensor Build Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Int64], TensorList (Build) feature_list_dense_types)
parseSingleSequenceExample' :: (OneOfs '[ByteString, Int64, Float] context_sparse_types, OneOfs '[ByteString, Int64, Float] tcontext_dense, OneOfs '[ByteString, Int64, Float] feature_list_dense_types, OneOfs '[ByteString, Int64, Float] feature_list_sparse_types) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> [Tensor v'5 ByteString] -> [Tensor v'6 ByteString] -> TensorList (v'7) tcontext_dense -> Tensor v'8 ByteString -> ([Tensor Build Int64], TensorList (Build) context_sparse_types, [Tensor Build Int64], TensorList (Build) tcontext_dense, [Tensor Build Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Int64], TensorList (Build) feature_list_dense_types)
-- | Transforms a serialized tensorflow.TensorProto proto into a Tensor.
parseTensor :: (TensorType out_type) => Tensor v'1 ByteString -> Tensor Build out_type
parseTensor' :: (TensorType out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type
-- | A placeholder op for a value that will be fed into the computation.
--
-- N.B. This operation will fail with an error if it is executed. It is
-- intended as a way to represent a value that will always be fed, and to
-- provide attrs that enable the fed value to be checked at runtime.
placeholder :: (TensorType dtype) => Tensor Build dtype
placeholder' :: (TensorType dtype) => OpParams -> Tensor Build dtype
-- | A placeholder op for a value that will be fed into the computation.
--
-- N.B. This operation will fail with an error if it is executed. It is
-- intended as a way to represent a value that will always be fed, and to
-- provide attrs that enable the fed value to be checked at runtime.
placeholderV2 :: (TensorType dtype) => Shape -> Tensor Build dtype
placeholderV2' :: (TensorType dtype) => OpParams -> Shape -> Tensor Build dtype
-- | A placeholder op that passes through <tt>input</tt> when its output is
-- not fed.
placeholderWithDefault :: (TensorType dtype) => Shape -> Tensor v'1 dtype -> Tensor Build dtype
placeholderWithDefault' :: (TensorType dtype) => OpParams -> Shape -> Tensor v'1 dtype -> Tensor Build dtype
-- | Compute the polygamma function \(psi^{(n)}(x)\).
--
-- The polygamma function is defined as:
--
-- ``` psi^{(n)}(x) = frac{d^n}{dx^n} psi(x) ``` where \(psi(x)\) is the
-- digamma function.
polygamma :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
polygamma' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes the power of one value to another.
--
-- Given a tensor <tt>x</tt> and a tensor <tt>y</tt>, this operation
-- computes \(x^y\) for corresponding elements in <tt>x</tt> and
-- <tt>y</tt>. For example:
--
-- ``` # tensor <tt>x</tt> is [[2, 2]], [3, 3]] # tensor <tt>y</tt> is
-- [[8, 16], [2, 3]] tf.pow(x, y) ==&gt; [[256, 65536], [9, 27]] ```
pow :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
pow' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | An identity op that triggers an error if a gradient is requested.
--
-- When executed in a graph, this op outputs its input tensor as-is.
--
-- When building ops to compute gradients, the TensorFlow gradient system
-- will return an error when trying to lookup the gradient of this op,
-- because no gradient must ever be registered for this function. This op
-- exists to prevent subtle bugs from silently returning unimplemented
-- gradients in some corner cases.
preventGradient :: (TensorType t) => Tensor v'1 t -> Tensor Build t
preventGradient' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Prints a list of tensors.
--
-- Passes <tt>input</tt> through to <tt>output</tt> and prints `data`
-- when evaluating.
print :: (MonadBuild m', TensorType t, TensorTypes u) => Tensor v'1 t -> TensorList (v'2) u -> m' (Tensor Value t)
print' :: (MonadBuild m', TensorType t, TensorTypes u) => OpParams -> Tensor v'1 t -> TensorList (v'2) u -> m' (Tensor Value t)
-- | A queue that produces elements sorted by the first component value.
--
-- Note that the PriorityQueue requires the first component of any
-- element to be a scalar int64, in addition to the other elements
-- declared by component_types. Therefore calls to Enqueue and
-- EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will
-- all require (resp. output) one extra entry in their input (resp.
-- output) lists.
priorityQueue :: (MonadBuild m') => m' (Tensor Ref ByteString)
priorityQueue' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString)
-- | A queue that produces elements sorted by the first component value.
--
-- Note that the PriorityQueue requires the first component of any
-- element to be a scalar int64, in addition to the other elements
-- declared by component_types. Therefore calls to Enqueue and
-- EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will
-- all require (resp. output) one extra entry in their input (resp.
-- output) lists.
priorityQueueV2 :: (MonadBuild m') => m' (ResourceHandle)
priorityQueueV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle)
-- | Computes the product of elements across dimensions of a tensor.
--
-- Reduces <tt>input</tt> along the dimensions given in
-- <tt>reduction_indices</tt>. Unless <tt>keep_dims</tt> is true, the
-- rank of the tensor is reduced by 1 for each entry in
-- <tt>reduction_indices</tt>. If <tt>keep_dims</tt> is true, the reduced
-- dimensions are retained with length 1.
prod :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
prod' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
-- | Computes the QR decompositions of one or more matrices.
--
-- Computes the QR decomposition of each inner matrix in <tt>tensor</tt>
-- such that `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
--
-- ```prettyprint # a is a tensor. # q is a tensor of orthonormal
-- matrices. # r is a tensor of upper triangular matrices. q, r = qr(a)
-- q_full, r_full = qr(a, full_matrices=True) ```
qr :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t)
qr' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t)
-- | Quantizes then dequantizes a tensor.
--
-- This op simulates the precision loss from the quantized forward pass
-- by: 1. Quantizing the tensor to fixed point numbers, which should
-- match the target quantization method when it is used in inference. 2.
-- Dequantizing it back to floating point numbers for the following ops,
-- most likely matmul.
--
-- There are different ways to quantize. This version does not use the
-- full range of the output type, choosing to elide the lowest possible
-- value for symmetry (e.g., output range is -127 to 127, not -128 to 127
-- for signed 8 bit quantization), so that 0.0 maps to 0.
--
-- To perform this op, we first find the range of values in our tensor.
-- The range we use is always centered on 0, so we find m such that
--
-- <ol>
-- <li>m = max(abs(input_min), abs(input_max)) if range_given is
-- true,</li>
-- <li>m = max(max(abs(min_elem(input)), abs(max_elem(input)))
-- otherwise.</li>
-- </ol>
--
-- Our input tensor range is then [-m, m].
--
-- Next, we choose our fixed-point quantization buckets, [min_fixed,
-- max_fixed]. If signed_input is true, this is
--
-- <ul>
-- <li><i>min_fixed, max_fixed </i> =</li>
-- <li><i>-(1 &lt;&lt; (num_bits - 1) - 1), (1 &lt;&lt; (num_bits - 1)) -
-- 1</i> .</li>
-- </ul>
--
-- Otherwise, if signed_input is false, the fixed-point range is
--
-- <ul>
-- <li><i>min_fixed, max_fixed</i> = [0, (1 &lt;&lt; num_bits) - 1].</li>
-- </ul>
--
-- From this we compute our scaling factor, s:
--
-- s = (max_fixed - min_fixed) / (2 * m).
--
-- Now we can quantize and dequantize the elements of our tensor. An
-- element e is transformed into e':
--
-- e' = (e * s).round_to_nearest() / s.
--
-- Note that we have a different number of buckets in the signed vs.
-- unsigned cases. For example, if num_bits == 8, we get 254 buckets in
-- the signed case vs. 255 in the unsigned case.
--
-- For example, suppose num_bits = 8 and m = 1. Then
--
-- <ul>
-- <li><i>min_fixed, max_fixed</i> = [-127, 127], and s = (127 + 127) / 2
-- = 127.</li>
-- </ul>
--
-- Given the vector {-1, -0.5, 0, 0.3}, this is quantized to {-127, -63,
-- 0, 38}, and dequantized to {-1, -63.0<i>127, 0, 38.0</i>127}.
quantizeAndDequantize :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
quantizeAndDequantize' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Convert the quantized <tt>input</tt> tensor into a lower-precision
-- <tt>output</tt>, using the
--
-- actual distribution of the values to maximize the usage of the lower
-- bit depth and adjusting the output min and max ranges accordingly.
--
-- <ul>
-- <li><i>input_min, input_max</i> are scalar floats that specify the
-- range for the float interpretation of the <tt>input</tt> data. For
-- example, if input_min is -1.0f and input_max is 1.0f, and we are
-- dealing with quint16 quantized data, then a 0 value in the 16-bit data
-- should be interpreted as -1.0f, and a 65535 means 1.0f.</li>
-- </ul>
--
-- This operator tries to squeeze as much precision as possible into an
-- output with a lower bit depth by calculating the actual min and max
-- values found in the data. For example, maybe that quint16 input has no
-- values lower than 16,384 and none higher than 49,152. That means only
-- half the range is actually needed, all the float interpretations are
-- between -0.5f and 0.5f, so if we want to compress the data into a
-- quint8 output, we can use that range rather than the theoretical -1.0f
-- to 1.0f that is suggested by the input min and max.
--
-- In practice, this is most useful for taking output from operations
-- like QuantizedMatMul that can produce higher bit-depth outputs than
-- their inputs and may have large potential output ranges, but in
-- practice have a distribution of input values that only uses a small
-- fraction of the possible range. By feeding that output into this
-- operator, we can reduce it from 32 bits down to 8 with minimal loss of
-- accuracy.
quantizeDownAndShrinkRange :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
quantizeDownAndShrinkRange' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
-- | Quantize the <tt>input</tt> tensor of type float to <tt>output</tt>
-- tensor of type <tt>T</tt>.
--
-- <ul>
-- <li><i>min_range, max_range</i> are scalar floats that specify the
-- range for the <tt>input</tt> data. The <tt>mode</tt> attribute
-- controls exactly which calculations are used to convert the float
-- values to their quantized equivalents.</li>
-- </ul>
--
-- In <tt>MIN_COMBINED</tt> mode, each value of the tensor will undergo
-- the following:
--
-- ``` out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
-- if T == qint8, out[i] -= (range(T) + 1) / 2.0 ``` here `range(T) =
-- numeric_limits<a>T</a>::max() - numeric_limits<a>T</a>::min()`
--
-- <ul>
-- <li>MIN_COMBINED Mode Example*</li>
-- </ul>
--
-- Assume the input is type float and has a possible range of [0.0, 6.0]
-- and the output type is quint8 ([0, 255]). The min_range and max_range
-- values should be specified as 0.0 and 6.0. Quantizing from float to
-- quint8 will multiply each value of the input by 255/6 and cast to
-- quint8.
--
-- If the output type was qint8 ([-128, 127]), the operation will
-- additionally subtract each value by 128 prior to casting, so that the
-- range of values aligns with the range of qint8.
--
-- If the mode is <tt>MIN_FIRST</tt>, then this approach is used:
--
-- ``` number_of_steps = 1 &lt;&lt; (# of bits in T) range_adjust =
-- number_of_steps / (number_of_steps - 1) range = (range_max -
-- range_min) * range_adjust range_scale = number_of_steps / range
-- quantized = round(input * range_scale) - round(range_min *
-- range_scale) + numeric_limits<a>T</a>::min() quantized =
-- max(quantized, numeric_limits<a>T</a>::min()) quantized =
-- min(quantized, numeric_limits<a>T</a>::max()) ```
--
-- The biggest difference between this and MIN_COMBINED is that the
-- minimum range is rounded first, before it's subtracted from the
-- rounded value. With MIN_COMBINED, a small bias is introduced where
-- repeated iterations of quantizing and dequantizing will introduce a
-- larger and larger error.
--
-- One thing to watch out for is that the operator may choose to adjust
-- the requested minimum and maximum values slightly during the
-- quantization process, so you should always use the output ports as the
-- range for further calculations. For example, if the requested minimum
-- and maximum values are close to equal, they will be separated by a
-- small epsilon value to prevent ill-formed quantized buffers from being
-- created. Otherwise, you can end up with buffers where all the
-- quantized values map to the same float value, which causes problems
-- for operations that have to perform further calculations on them.
quantizeV2 :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
quantizeV2' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
-- | Produces the average pool of the input tensor for quantized types.
quantizedAvgPool :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
quantizedAvgPool' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
-- | Quantized Batch normalization.
--
-- This op is deprecated and will be removed in the future. Prefer
-- `tf.nn.batch_normalization`.
quantizedBatchNormWithGlobalNormalization :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Bool -> Float -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 tinput -> Tensor v'5 Float -> Tensor v'6 Float -> Tensor v'7 tinput -> Tensor v'8 Float -> Tensor v'9 Float -> Tensor v'10 tinput -> Tensor v'11 Float -> Tensor v'12 Float -> Tensor v'13 tinput -> Tensor v'14 Float -> Tensor v'15 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
quantizedBatchNormWithGlobalNormalization' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Bool -> Float -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 tinput -> Tensor v'5 Float -> Tensor v'6 Float -> Tensor v'7 tinput -> Tensor v'8 Float -> Tensor v'9 Float -> Tensor v'10 tinput -> Tensor v'11 Float -> Tensor v'12 Float -> Tensor v'13 tinput -> Tensor v'14 Float -> Tensor v'15 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
-- | Adds Tensor <tt>bias</tt> to Tensor <tt>input</tt> for Quantized
-- types.
--
-- Broadcasts the values of bias on dimensions 0..N-2 of <tt>input</tt>.
quantizedBiasAdd :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
quantizedBiasAdd' :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
-- | Concatenates quantized tensors along one dimension.
quantizedConcat :: (TensorType t) => Tensor v'1 Int32 -> [Tensor v'2 t] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
quantizedConcat' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
-- | Computes a 2D convolution given quantized 4D input and filter tensors.
--
-- The inputs are quantized tensors where the lowest value represents the
-- real number of the associated minimum, and the highest represents the
-- maximum. This means that you can only interpret the quantized output
-- in the same way, by taking the returned minimum and maximum values
-- into account.
quantizedConv2D :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] tfilter, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 tfilter -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
quantizedConv2D' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] tfilter, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 tfilter -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
-- | Quantized Instance normalization.
quantizedInstanceNorm :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
quantizedInstanceNorm' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
-- | Perform a quantized matrix multiplication of <tt>a</tt> by the matrix
-- <tt>b</tt>.
--
-- The inputs must be two-dimensional matrices and the inner dimension of
-- <tt>a</tt> (after being transposed if <tt>transpose_a</tt> is
-- non-zero) must match the outer dimension of <tt>b</tt> (after being
-- transposed if <tt>transposed_b</tt> is non-zero).
quantizedMatMul :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) => Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)
quantizedMatMul' :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) => OpParams -> Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)
-- | Produces the max pool of the input tensor for quantized types.
quantizedMaxPool :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
quantizedMaxPool' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
-- | Computes Quantized Rectified Linear: `max(features, 0)`
quantizedRelu :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
quantizedRelu' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
-- | Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
quantizedRelu6 :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
quantizedRelu6' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
-- | Computes Quantized Rectified Linear X: `min(max(features, 0),
-- max_value)`
quantizedReluX :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
quantizedReluX' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
-- | Reshapes a quantized tensor as per the Reshape op.
--
-- ```
quantizedReshape :: (TensorType t, OneOf '[Int32, Int64] tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
quantizedReshape' :: (TensorType t, OneOf '[Int32, Int64] tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
-- | Closes the given queue.
--
-- This operation signals that no more elements will be enqueued in the
-- given queue. Subsequent Enqueue(Many) operations will fail. Subsequent
-- Dequeue(Many) operations will continue to succeed if sufficient
-- elements remain in the queue. Subsequent Dequeue(Many) operations that
-- would block will fail immediately.
queueClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode)
queueClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode)
-- | Closes the given queue.
--
-- This operation signals that no more elements will be enqueued in the
-- given queue. Subsequent Enqueue(Many) operations will fail. Subsequent
-- Dequeue(Many) operations will continue to succeed if sufficient
-- elements remain in the queue. Subsequent Dequeue(Many) operations that
-- would block will fail immediately.
queueCloseV2 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode)
queueCloseV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode)
-- | Dequeues a tuple of one or more tensors from the given queue.
--
-- This operation has k outputs, where k is the number of components in
-- the tuples stored in the given queue, and output i is the ith
-- component of the dequeued tuple.
--
-- N.B. If the queue is empty, this operation will block until an element
-- has been dequeued (or <tt>timeout_ms</tt> elapses, if specified).
queueDequeue :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> m' (TensorList (Value) component_types)
queueDequeue' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> m' (TensorList (Value) component_types)
-- | Dequeues n tuples of one or more tensors from the given queue.
--
-- If the queue is closed and there are fewer than n elements, then an
-- OutOfRange error is returned.
--
-- This operation concatenates queue-element component tensors along the
-- 0th dimension to make a single component tensor. All of the components
-- in the dequeued tuple will have size n in the 0th dimension.
--
-- This operation has k outputs, where k is the number of components in
-- the tuples stored in the given queue, and output i is the ith
-- component of the dequeued tuple.
--
-- N.B. If the queue is empty, this operation will block until n elements
-- have been dequeued (or <tt>timeout_ms</tt> elapses, if specified).
queueDequeueMany :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types)
queueDequeueMany' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types)
-- | Dequeues n tuples of one or more tensors from the given queue.
--
-- If the queue is closed and there are fewer than n elements, then an
-- OutOfRange error is returned.
--
-- This operation concatenates queue-element component tensors along the
-- 0th dimension to make a single component tensor. All of the components
-- in the dequeued tuple will have size n in the 0th dimension.
--
-- This operation has k outputs, where k is the number of components in
-- the tuples stored in the given queue, and output i is the ith
-- component of the dequeued tuple.
--
-- N.B. If the queue is empty, this operation will block until n elements
-- have been dequeued (or <tt>timeout_ms</tt> elapses, if specified).
queueDequeueManyV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types)
queueDequeueManyV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types)
-- | Dequeues n tuples of one or more tensors from the given queue.
--
-- This operation is not supported by all queues. If a queue does not
-- support DequeueUpTo, then an Unimplemented error is returned.
--
-- If the queue is closed and there are more than 0 but less than n
-- elements remaining, then instead of returning an OutOfRange error like
-- QueueDequeueMany, less than <tt>n</tt> elements are returned
-- immediately. If the queue is closed and there are 0 elements left in
-- the queue, then an OutOfRange error is returned just like in
-- QueueDequeueMany. Otherwise the behavior is identical to
-- QueueDequeueMany:
--
-- This operation concatenates queue-element component tensors along the
-- 0th dimension to make a single component tensor. All of the components
-- in the dequeued tuple will have size n in the 0th dimension.
--
-- This operation has k outputs, where k is the number of components in
-- the tuples stored in the given queue, and output i is the ith
-- component of the dequeued tuple.
queueDequeueUpTo :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types)
queueDequeueUpTo' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types)
-- | Dequeues n tuples of one or more tensors from the given queue.
--
-- This operation is not supported by all queues. If a queue does not
-- support DequeueUpTo, then an Unimplemented error is returned.
--
-- If the queue is closed and there are more than 0 but less than n
-- elements remaining, then instead of returning an OutOfRange error like
-- QueueDequeueMany, less than <tt>n</tt> elements are returned
-- immediately. If the queue is closed and there are 0 elements left in
-- the queue, then an OutOfRange error is returned just like in
-- QueueDequeueMany. Otherwise the behavior is identical to
-- QueueDequeueMany:
--
-- This operation concatenates queue-element component tensors along the
-- 0th dimension to make a single component tensor. All of the components
-- in the dequeued tuple will have size n in the 0th dimension.
--
-- This operation has k outputs, where k is the number of components in
-- the tuples stored in the given queue, and output i is the ith
-- component of the dequeued tuple.
queueDequeueUpToV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types)
queueDequeueUpToV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types)
-- | Dequeues a tuple of one or more tensors from the given queue.
--
-- This operation has k outputs, where k is the number of components in
-- the tuples stored in the given queue, and output i is the ith
-- component of the dequeued tuple.
--
-- N.B. If the queue is empty, this operation will block until an element
-- has been dequeued (or <tt>timeout_ms</tt> elapses, if specified).
queueDequeueV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> m' (TensorList (Value) component_types)
queueDequeueV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> m' (TensorList (Value) component_types)
-- | Enqueues a tuple of one or more tensors in the given queue.
--
-- The components input has k elements, which correspond to the
-- components of tuples stored in the given queue.
--
-- N.B. If the queue is full, this operation will block until the given
-- element has been enqueued (or <tt>timeout_ms</tt> elapses, if
-- specified).
queueEnqueue :: (MonadBuild m', TensorTypes tcomponents) => Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode)
queueEnqueue' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode)
-- | Enqueues zero or more tuples of one or more tensors in the given
-- queue.
--
-- This operation slices each component tensor along the 0th dimension to
-- make multiple queue elements. All of the tuple components must have
-- the same size in the 0th dimension.
--
-- The components input has k elements, which correspond to the
-- components of tuples stored in the given queue.
--
-- N.B. If the queue is full, this operation will block until the given
-- elements have been enqueued (or <tt>timeout_ms</tt> elapses, if
-- specified).
queueEnqueueMany :: (MonadBuild m', TensorTypes tcomponents) => Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode)
queueEnqueueMany' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode)
-- | Enqueues zero or more tuples of one or more tensors in the given
-- queue.
--
-- This operation slices each component tensor along the 0th dimension to
-- make multiple queue elements. All of the tuple components must have
-- the same size in the 0th dimension.
--
-- The components input has k elements, which correspond to the
-- components of tuples stored in the given queue.
--
-- N.B. If the queue is full, this operation will block until the given
-- elements have been enqueued (or <tt>timeout_ms</tt> elapses, if
-- specified).
queueEnqueueManyV2 :: (MonadBuild m', TensorTypes tcomponents) => ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode)
queueEnqueueManyV2' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode)
-- | Enqueues a tuple of one or more tensors in the given queue.
--
-- The components input has k elements, which correspond to the
-- components of tuples stored in the given queue.
--
-- N.B. If the queue is full, this operation will block until the given
-- element has been enqueued (or <tt>timeout_ms</tt> elapses, if
-- specified).
queueEnqueueV2 :: (MonadBuild m', TensorTypes tcomponents) => ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode)
queueEnqueueV2' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode)
-- | Computes the number of elements in the given queue.
queueSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32)
queueSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32)
-- | Computes the number of elements in the given queue.
queueSizeV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int32)
queueSizeV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int32)
-- | Converts one or more images from RGB to HSV.
--
-- Outputs a tensor of the same shape as the <tt>images</tt> tensor,
-- containing the HSV value of the pixels. The output is only well
-- defined if the value in <tt>images</tt> are in `[0,1]`.
--
-- `output[..., 0]` contains hue, `output[..., 1]` contains saturation,
-- and `output[..., 2]` contains value. All HSV values are in `[0,1]`. A
-- hue of 0 corresponds to pure red, hue 1<i>3 is pure green, and 2</i>3
-- is pure blue.
rGBToHSV :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
rGBToHSV' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Randomly crop <tt>image</tt>.
--
-- <a>size</a> is a 1-D int64 tensor with 2 elements representing the
-- crop height and width. The values must be non negative.
--
-- This Op picks a random location in <tt>image</tt> and crops a
-- <tt>height</tt> by <tt>width</tt> rectangle from that location. The
-- random location is picked so the cropped area will fit inside the
-- original image.
randomCrop :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int64 -> m' (Tensor Value t)
randomCrop' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> m' (Tensor Value t)
-- | Outputs random values from the Gamma distribution(s) described by
-- alpha.
--
-- This op uses the algorithm by Marsaglia et al. to acquire samples via
-- transformation-rejection from pairs of uniform and normal random
-- variables. See <a>http://dl.acm.org/citation.cfm?id=358414</a>
randomGamma :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] t) => Tensor v'1 s -> Tensor v'2 t -> m' (Tensor Value t)
randomGamma' :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 s -> Tensor v'2 t -> m' (Tensor Value t)
-- | Randomly shuffles a tensor along its first dimension.
--
-- The tensor is shuffled along dimension 0, such that each `value[j]` is
-- mapped to one and only one `output[i]`. For example, a mapping that
-- might occur for a 3x2 tensor is:
--
-- ```prettyprint [[1, 2], [[5, 6], [3, 4], ==&gt; [1, 2], [5, 6]] [3,
-- 4]] ```
randomShuffle :: (MonadBuild m', TensorType t) => Tensor v'1 t -> m' (Tensor Value t)
randomShuffle' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -> m' (Tensor Value t)
-- | A queue that randomizes the order of elements.
randomShuffleQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString)
randomShuffleQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString)
-- | A queue that randomizes the order of elements.
randomShuffleQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle)
randomShuffleQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle)
-- | Outputs random values from a normal distribution.
--
-- The generated values will have mean 0 and standard deviation 1.
randomStandardNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype)
randomStandardNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype)
-- | Outputs random values from a uniform distribution.
--
-- The generated values follow a uniform distribution in the range `[0,
-- 1)`. The lower bound 0 is included in the range, while the upper bound
-- 1 is excluded.
randomUniform :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype)
randomUniform' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype)
-- | Outputs random integers from a uniform distribution.
--
-- The generated values are uniform integers in the range `[minval,
-- maxval)`. The lower bound <tt>minval</tt> is included in the range,
-- while the upper bound <tt>maxval</tt> is excluded.
--
-- The random integers are slightly biased unless `maxval - minval` is an
-- exact power of two. The bias is small for values of `maxval - minval`
-- significantly smaller than the range of the output (either `2^32` or
-- `2^64`).
randomUniformInt :: (MonadBuild m', OneOf '[Int32, Int64] tout, OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 tout -> Tensor v'3 tout -> m' (Tensor Value tout)
randomUniformInt' :: (MonadBuild m', OneOf '[Int32, Int64] tout, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 tout -> Tensor v'3 tout -> m' (Tensor Value tout)
-- | Creates a sequence of numbers.
--
-- This operation creates a sequence of numbers that begins at
-- <tt>start</tt> and extends by increments of <tt>delta</tt> up to but
-- not including <tt>limit</tt>.
--
-- For example:
--
-- ``` # <tt>start</tt> is 3 # <tt>limit</tt> is 18 # <tt>delta</tt> is 3
-- tf.range(start, limit, delta) ==&gt; [3, 6, 9, 12, 15] ```
range :: (OneOf '[Int32, Int64, Double, Float] tidx) => Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx
range' :: (OneOf '[Int32, Int64, Double, Float] tidx) => OpParams -> Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx
-- | Returns the rank of a tensor.
--
-- This operation returns an integer representing the rank of
-- <tt>input</tt>.
--
-- For example:
--
-- ```prettyprint # <tt>t</tt> is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3],
-- [4, 4, 4]]] # shape of tensor <tt>t</tt> is [2, 2, 3] rank(t) ==&gt; 3
-- ```
--
-- <ul>
-- <li>*Note**: The rank of a tensor is not the same as the rank of a
-- matrix. The rank of a tensor is the number of indices required to
-- uniquely select each element of the tensor. Rank is also known as
-- "order", "degree", or "ndims."</li>
-- </ul>
rank :: (TensorType t) => Tensor v'1 t -> Tensor Build Int32
rank' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Int32
-- | Reads and outputs the entire contents of the input filename.
readFile :: Tensor v'1 ByteString -> Tensor Build ByteString
readFile' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString
-- | Reads the value of a variable.
--
-- The tensor returned by this operation is immutable.
--
-- The value returned by this operation is guaranteed to be influenced by
-- all the writes on which this operation depends directly or indirectly,
-- and to not be influenced by any of the writes which depend directly or
-- indirectly on this operation.
readVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> m' (Tensor Value dtype)
readVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> m' (Tensor Value dtype)
-- | Returns the number of records this Reader has produced.
--
-- This is the same as the number of ReaderRead executions that have
-- succeeded.
readerNumRecordsProduced :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64)
readerNumRecordsProduced' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64)
-- | Returns the number of records this Reader has produced.
--
-- This is the same as the number of ReaderRead executions that have
-- succeeded.
readerNumRecordsProducedV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int64)
readerNumRecordsProducedV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int64)
-- | Returns the number of work units this Reader has finished processing.
readerNumWorkUnitsCompleted :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64)
readerNumWorkUnitsCompleted' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64)
-- | Returns the number of work units this Reader has finished processing.
readerNumWorkUnitsCompletedV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int64)
readerNumWorkUnitsCompletedV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int64)
-- | Returns the next record (key, value pair) produced by a Reader.
--
-- Will dequeue from the input queue if necessary (e.g. when the Reader
-- needs to start reading from a new file since it has finished with the
-- previous file).
readerRead :: (MonadBuild m') => Tensor Ref ByteString -> Tensor Ref ByteString -> m' ((Tensor Value ByteString, Tensor Value ByteString))
readerRead' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor Ref ByteString -> m' ((Tensor Value ByteString, Tensor Value ByteString))
-- | Returns up to <tt>num_records</tt> (key, value) pairs produced by a
-- Reader.
--
-- Will dequeue from the input queue if necessary (e.g. when the Reader
-- needs to start reading from a new file since it has finished with the
-- previous file). It may return less than <tt>num_records</tt> even
-- before the last batch.
readerReadUpTo :: (MonadBuild m') => Tensor Ref ByteString -> Tensor Ref ByteString -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString))
readerReadUpTo' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor Ref ByteString -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString))
-- | Returns up to <tt>num_records</tt> (key, value) pairs produced by a
-- Reader.
--
-- Will dequeue from the input queue if necessary (e.g. when the Reader
-- needs to start reading from a new file since it has finished with the
-- previous file). It may return less than <tt>num_records</tt> even
-- before the last batch.
readerReadUpToV2 :: (MonadBuild m') => ResourceHandle -> ResourceHandle -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString))
readerReadUpToV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString))
-- | Returns the next record (key, value pair) produced by a Reader.
--
-- Will dequeue from the input queue if necessary (e.g. when the Reader
-- needs to start reading from a new file since it has finished with the
-- previous file).
readerReadV2 :: (MonadBuild m') => ResourceHandle -> ResourceHandle -> m' ((Tensor Value ByteString, Tensor Value ByteString))
readerReadV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> ResourceHandle -> m' ((Tensor Value ByteString, Tensor Value ByteString))
-- | Restore a Reader to its initial clean state.
readerReset :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode)
readerReset' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode)
-- | Restore a Reader to its initial clean state.
readerResetV2 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode)
readerResetV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode)
-- | Restore a reader to a previously saved state.
--
-- Not all Readers support being restored, so this can produce an
-- Unimplemented error.
readerRestoreState :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode)
readerRestoreState' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode)
-- | Restore a reader to a previously saved state.
--
-- Not all Readers support being restored, so this can produce an
-- Unimplemented error.
readerRestoreStateV2 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 ByteString -> m' (ControlNode)
readerRestoreStateV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 ByteString -> m' (ControlNode)
-- | Produce a string tensor that encodes the state of a Reader.
--
-- Not all Readers support being serialized, so this can produce an
-- Unimplemented error.
readerSerializeState :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value ByteString)
readerSerializeState' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value ByteString)
-- | Produce a string tensor that encodes the state of a Reader.
--
-- Not all Readers support being serialized, so this can produce an
-- Unimplemented error.
readerSerializeStateV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value ByteString)
readerSerializeStateV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value ByteString)
-- | Returns the real part of a complex number.
--
-- Given a tensor <tt>input</tt> of complex numbers, this operation
-- returns a tensor of type <tt>float</tt> that is the real part of each
-- element in <tt>input</tt>. All elements in <tt>input</tt> must be
-- complex numbers of the form \(a + bj\), where *a* is the real part
-- returned by this operation and *b* is the imaginary part.
--
-- For example:
--
-- ``` # tensor <tt>input</tt> is [-2.25 + 4.75j, 3.25 + 5.75j]
-- tf.real(input) ==&gt; [-2.25, 3.25] ```
real :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout
real' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout
-- | Returns x / y element-wise for real types.
--
-- If <tt>x</tt> and <tt>y</tt> are reals, this will return the
-- floating-point division.
--
-- <ul>
-- <li>NOTE*: <tt>Div</tt> supports broadcasting. More about broadcasting
-- <a>here</a></li>
-- </ul>
realDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
realDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes the reciprocal of x element-wise.
--
-- I.e., \(y = 1 / x\).
reciprocal :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
reciprocal' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the gradient for the inverse of <tt>x</tt> wrt its input.
--
-- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and <tt>dy</tt> is
-- the corresponding input gradient.
reciprocalGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
reciprocalGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Emits randomized records.
recordInput :: (MonadBuild m') => m' (Tensor Value ByteString)
recordInput' :: (MonadBuild m') => OpParams -> m' (Tensor Value ByteString)
-- | Joins a string Tensor across the given dimensions.
--
-- Computes the string join across dimensions in the given string Tensor
-- of shape `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by
-- joining the input strings with the given separator (default: empty
-- string). Negative indices are counted backwards from the end, with
-- `-1` being equivalent to `n - 1`.
--
-- For example:
--
-- ``` # tensor <tt>a</tt> is [["a", "b"], ["c", "d"]] tf.reduce_join(a,
-- 0) ==&gt; ["ac", "bd"] tf.reduce_join(a, 1) ==&gt; ["ab", "cd"]
-- tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==&gt; ["ac", "bd"]
-- tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==&gt; ["ab", "cd"]
-- tf.reduce_join(a, 0, keep_dims=True) ==&gt; [["ac", "bd"]]
-- tf.reduce_join(a, 1, keep_dims=True) ==&gt; [["ab"], ["cd"]]
-- tf.reduce_join(a, 0, separator=".") ==&gt; ["a.c", "b.d"]
-- tf.reduce_join(a, [0, 1]) ==&gt; ["acbd"] tf.reduce_join(a, [1, 0])
-- ==&gt; ["abcd"] tf.reduce_join(a, []) ==&gt; ["abcd"] ```
reduceJoin :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString
reduceJoin' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString
-- | Creates or finds a child frame, and makes `data` available to the
-- child frame.
--
-- The unique <tt>frame_name</tt> is used by the <tt>Executor</tt> to
-- identify frames. If <tt>is_constant</tt> is true, <tt>output</tt> is a
-- constant in the child frame; otherwise it may be changed in the child
-- frame. At most <tt>parallel_iterations</tt> iterations are run in
-- parallel in the child frame.
refEnter :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t)
refEnter' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t)
-- | Exits the current frame to its parent frame.
--
-- Exit makes its input `data` available to the parent frame.
refExit :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t)
refExit' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t)
-- | Return the same ref tensor as the input ref tensor.
refIdentity :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t)
refIdentity' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t)
-- | Forwards the value of an available tensor from <tt>inputs</tt> to
-- <tt>output</tt>.
--
-- <tt>Merge</tt> waits for at least one of the tensors in
-- <tt>inputs</tt> to become available. It is usually combined with
-- <tt>Switch</tt> to implement branching.
--
-- <tt>Merge</tt> forwards the first tensor for become available to
-- <tt>output</tt>, and sets <tt>value_index</tt> to its index in
-- <tt>inputs</tt>.
refMerge :: (MonadBuild m', TensorType t) => [Tensor Ref t] -> m' ((Tensor Ref t, Tensor Value Int32))
refMerge' :: (MonadBuild m', TensorType t) => OpParams -> [Tensor Ref t] -> m' ((Tensor Ref t, Tensor Value Int32))
-- | Makes its input available to the next iteration.
refNextIteration :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t)
refNextIteration' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t)
-- | Forwards the <tt>index</tt>th element of <tt>inputs</tt> to
-- <tt>output</tt>.
refSelect :: (MonadBuild m', TensorType t) => Tensor v'1 Int32 -> [Tensor Ref t] -> m' (Tensor Ref t)
refSelect' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor Ref t] -> m' (Tensor Ref t)
-- | Forwards the ref tensor `data` to the output port determined by
-- <a>pred</a>.
--
-- If <a>pred</a> is true, the `data` input is forwarded to
-- <tt>output_true</tt>. Otherwise, the data goes to
-- <tt>output_false</tt>.
--
-- See also <tt>Switch</tt> and <tt>Merge</tt>.
refSwitch :: (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 Bool -> m' ((Tensor Ref t, Tensor Ref t))
refSwitch' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 Bool -> m' ((Tensor Ref t, Tensor Ref t))
-- | Computes rectified linear: `max(features, 0)`.
relu :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t
relu' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes rectified linear 6: `min(max(features, 0), 6)`.
relu6 :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t
relu6' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes rectified linear 6 gradients for a Relu6 operation.
relu6Grad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
relu6Grad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes rectified linear gradients for a Relu operation.
reluGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
reluGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Given a quantized tensor described by (input, input_min, input_max),
-- outputs a
--
-- range that covers the actual values present in that tensor. This op is
-- typically used to produce the requested_output_min and
-- requested_output_max for Requantize.
requantizationRange :: (OneOf '[Int16, Int32, Word16, Word8] tinput) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build Float, Tensor Build Float)
requantizationRange' :: (OneOf '[Int16, Int32, Word16, Word8] tinput) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build Float, Tensor Build Float)
-- | Convert the quantized <tt>input</tt> tensor into a lower-precision
-- <tt>output</tt>, using the
--
-- output range specified with <tt>requested_output_min</tt> and
-- <tt>requested_output_max</tt>.
--
-- <ul>
-- <li><i>input_min, input_max</i> are scalar floats that specify the
-- range for the float interpretation of the <tt>input</tt> data. For
-- example, if input_min is -1.0f and input_max is 1.0f, and we are
-- dealing with quint16 quantized data, then a 0 value in the 16-bit data
-- should be interpreted as -1.0f, and a 65535 means 1.0f.</li>
-- </ul>
requantize :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
requantize' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
-- | Reshapes a tensor.
--
-- Given <tt>tensor</tt>, this operation returns a tensor that has the
-- same values as <tt>tensor</tt> with shape <a>shape</a>.
--
-- If one component of <a>shape</a> is the special value -1, the size of
-- that dimension is computed so that the total size remains constant. In
-- particular, a <a>shape</a> of `[-1]` flattens into 1-D. At most one
-- component of <a>shape</a> can be -1.
--
-- If <a>shape</a> is 1-D or higher, then the operation returns a tensor
-- with shape <a>shape</a> filled with the values of <tt>tensor</tt>. In
-- this case, the number of elements implied by <a>shape</a> must be the
-- same as the number of elements in <tt>tensor</tt>.
--
-- For example:
--
-- ```prettyprint # tensor <tt>t</tt> is [1, 2, 3, 4, 5, 6, 7, 8, 9] #
-- tensor <tt>t</tt> has shape [9] reshape(t, [3, 3]) ==&gt; [[1, 2, 3],
-- [4, 5, 6], [7, 8, 9]]
--
-- # tensor <tt>t</tt> is [[[1, 1], [2, 2]], # [[3, 3], [4, 4]]] # tensor
-- <tt>t</tt> has shape [2, 2, 2] reshape(t, [2, 4]) ==&gt; [[1, 1, 2,
-- 2], [3, 3, 4, 4]]
--
-- # tensor <tt>t</tt> is [[[1, 1, 1], # [2, 2, 2]], # [[3, 3, 3], # [4,
-- 4, 4]], # [[5, 5, 5], # [6, 6, 6]]] # tensor <tt>t</tt> has shape [3,
-- 2, 3] # pass '[-1]' to flatten <tt>t</tt> reshape(t, [-1]) ==&gt; [1,
-- 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
--
-- # -1 can also be used to infer the shape
--
-- # -1 is inferred to be 9: reshape(t, [2, -1]) ==&gt; [[1, 1, 1, 2, 2,
-- 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2:
-- reshape(t, [-1, 9]) ==&gt; [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5,
-- 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3])
-- ==&gt; [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6,
-- 6, 6]]]
--
-- # tensor <tt>t</tt> is [7] # shape `[]` reshapes to a scalar
-- reshape(t, []) ==&gt; 7 ```
reshape :: (TensorType t, OneOf '[Int32, Int64] tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t
reshape' :: (TensorType t, OneOf '[Int32, Int64] tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t
-- | Resize <tt>images</tt> to <a>size</a> using area interpolation.
--
-- Input images can be of different types but output images are always
-- float.
resizeArea :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
resizeArea' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
-- | Resize <tt>images</tt> to <a>size</a> using bicubic interpolation.
--
-- Input images can be of different types but output images are always
-- float.
resizeBicubic :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
resizeBicubic' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
-- | Resize <tt>images</tt> to <a>size</a> using bilinear interpolation.
--
-- Input images can be of different types but output images are always
-- float.
resizeBilinear :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
resizeBilinear' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
-- | Computes the gradient of bilinear interpolation.
resizeBilinearGrad :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 t -> Tensor Build t
resizeBilinearGrad' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build t
-- | Resize <tt>images</tt> to <a>size</a> using nearest neighbor
-- interpolation.
resizeNearestNeighbor :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
resizeNearestNeighbor' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
-- | Computes the gradient of nearest neighbor interpolation.
resizeNearestNeighborGrad :: (OneOf '[Int32, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
resizeNearestNeighborGrad' :: (OneOf '[Int32, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
-- | Update '*var' according to the adadelta scheme.
--
-- accum = rho() * accum + (1 - rho()) * grad.square(); update =
-- (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
-- update_accum = rho() * update_accum + (1 - rho()) * update.square();
-- var -= update;
resourceApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (ControlNode)
resourceApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (ControlNode)
-- | Update '*var' according to the adagrad scheme.
--
-- accum += grad * grad var -= lr * grad * (1 / sqrt(accum))
resourceApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> m' (ControlNode)
resourceApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> m' (ControlNode)
-- | Update '*var' according to the proximal adagrad scheme.
resourceApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (ControlNode)
resourceApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (ControlNode)
-- | Update '*var' according to the Adam algorithm.
--
-- lr_t &lt;- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t &lt;-
-- beta1 * m_{t-1} + (1 - beta1) * g_t v_t &lt;- beta2 * v_{t-1} + (1 -
-- beta2) * g_t * g_t variable &lt;- variable - lr_t * m_t / (sqrt(v_t) +
-- epsilon)
resourceApplyAdam :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (ControlNode)
resourceApplyAdam' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (ControlNode)
-- | Update '*var' according to the centered RMSProp algorithm.
--
-- The centered RMSProp algorithm uses an estimate of the centered second
-- moment (i.e., the variance) for normalization, as opposed to regular
-- RMSProp, which uses the (uncentered) second moment. This often helps
-- with training, but is slightly more expensive in terms of computation
-- and memory.
--
-- Note that in dense implementation of this algorithm, mg, ms, and mom
-- will update even if the grad is zero, but in this sparse
-- implementation, mg, ms, and mom will not update in iterations during
-- which the grad is zero.
--
-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
-- mean_grad = decay * mean_grad + (1-decay) * gradient
--
-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon -
-- mean_grad ** 2)
--
-- mg &lt;- rho * mg_{t-1} + (1-rho) * grad ms &lt;- rho * ms_{t-1} +
-- (1-rho) * grad * grad mom &lt;- momentum * mom_{t-1} + lr * grad /
-- sqrt(ms - mg * mg + epsilon) var &lt;- var - mom
resourceApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode)
resourceApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode)
-- | Update '*var' according to the Ftrl-proximal scheme.
--
-- accum_new = accum + grad * grad linear += grad +
-- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0
-- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 -
-- linear) / quadratic if |linear| &gt; l1 else 0.0 accum = accum_new
resourceApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode)
resourceApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode)
-- | Update '*var' by subtracting <tt>alpha</tt> * <tt>delta</tt> from it.
resourceApplyGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> m' (ControlNode)
resourceApplyGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> m' (ControlNode)
-- | Update '*var' according to the momentum scheme. Set use_nesterov =
-- True if you
--
-- want to use Nesterov momentum.
--
-- accum = accum * momentum + grad var -= lr * accum
resourceApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode)
resourceApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode)
-- | Update '*var' and '*accum' according to FOBOS with Adagrad learning
-- rate.
--
-- accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var
-- = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
resourceApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (ControlNode)
resourceApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (ControlNode)
-- | Update '*var' as FOBOS algorithm with fixed learning rate.
--
-- prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) *
-- max{|prox_v|-alpha*l1,0}
resourceApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode)
resourceApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode)
-- | Update '*var' according to the RMSProp algorithm.
--
-- Note that in dense implementation of this algorithm, ms and mom will
-- update even if the grad is zero, but in this sparse implementation, ms
-- and mom will not update in iterations during which the grad is zero.
--
-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta =
-- learning_rate * gradient / sqrt(mean_square + epsilon)
--
-- ms &lt;- rho * ms_{t-1} + (1-rho) * grad * grad mom &lt;- momentum *
-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var &lt;- var - mom
resourceApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode)
resourceApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode)
-- | Gather slices from the variable pointed to by <tt>resource</tt>
-- according to <tt>indices</tt>.
--
-- <tt>indices</tt> must be an integer tensor of any dimension (usually
-- 0-D or 1-D). Produces an output tensor with shape `indices.shape +
-- params.shape[1:]` where:
--
-- ```python # Scalar indices output[:, ..., :] = params[indices, :, ...
-- :]
--
-- # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :]
--
-- # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i,
-- ..., j], :, ..., :] ```
resourceGather :: (MonadBuild m', TensorType dtype, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 tindices -> m' (Tensor Value dtype)
resourceGather' :: (MonadBuild m', TensorType dtype, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 tindices -> m' (Tensor Value dtype)
-- | Adds sparse updates to the variable referenced by <tt>resource</tt>.
--
-- This operation computes
--
-- # Scalar indices ref[indices, ...] += updates[...]
--
-- # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...]
--
-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...]
-- += updates[i, ..., j, ...]
--
-- Duplicate entries are handled correctly: if multiple <tt>indices</tt>
-- reference the same location, their contributions add.
--
-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/ScatterAdd.png" alt</a> <a>/div</a>
resourceScatterAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 tindices -> Tensor v'3 dtype -> m' (ControlNode)
resourceScatterAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 tindices -> Tensor v'3 dtype -> m' (ControlNode)
-- | var: Should be from a Variable().
resourceSparseApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (ControlNode)
resourceSparseApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (ControlNode)
-- | Update relevant entries in '*var' and '*accum' according to the
-- adagrad scheme.
--
-- That is for rows we have grad for, we update var and accum as follows:
-- accum += grad * grad var -= lr * grad * (1 / sqrt(accum))
resourceSparseApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (ControlNode)
resourceSparseApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (ControlNode)
-- | Update entries in '*var' and '*accum' according to the proximal
-- adagrad scheme.
resourceSparseApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (ControlNode)
resourceSparseApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (ControlNode)
-- | Update '*var' according to the centered RMSProp algorithm.
--
-- The centered RMSProp algorithm uses an estimate of the centered second
-- moment (i.e., the variance) for normalization, as opposed to regular
-- RMSProp, which uses the (uncentered) second moment. This often helps
-- with training, but is slightly more expensive in terms of computation
-- and memory.
--
-- Note that in dense implementation of this algorithm, mg, ms, and mom
-- will update even if the grad is zero, but in this sparse
-- implementation, mg, ms, and mom will not update in iterations during
-- which the grad is zero.
--
-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
-- mean_grad = decay * mean_grad + (1-decay) * gradient Delta =
-- learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad **
-- 2)
--
-- ms &lt;- rho * ms_{t-1} + (1-rho) * grad * grad mom &lt;- momentum *
-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var &lt;- var - mom
resourceSparseApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (ControlNode)
resourceSparseApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (ControlNode)
-- | Update relevant entries in '*var' according to the Ftrl-proximal
-- scheme.
--
-- That is for rows we have grad for, we update var, accum and linear as
-- follows: accum_new = accum + grad * grad linear += grad +
-- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0
-- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 -
-- linear) / quadratic if |linear| &gt; l1 else 0.0 accum = accum_new
resourceSparseApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode)
resourceSparseApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode)
-- | Update relevant entries in '*var' and '*accum' according to the
-- momentum scheme.
--
-- Set use_nesterov = True if you want to use Nesterov momentum.
--
-- That is for rows we have grad for, we update var and accum as follows:
--
-- accum = accum * momentum + grad var -= lr * accum
resourceSparseApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (ControlNode)
resourceSparseApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (ControlNode)
-- | Sparse update entries in '*var' and '*accum' according to FOBOS
-- algorithm.
--
-- That is for rows we have grad for, we update var and accum as follows:
-- accum += grad * grad prox_v = var prox_v -= lr * grad * (1 /
-- sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
resourceSparseApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (ControlNode)
resourceSparseApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (ControlNode)
-- | Sparse update '*var' as FOBOS algorithm with fixed learning rate.
--
-- That is for rows we have grad for, we update var as follows: prox_v =
-- var - alpha * grad var = sign(prox_v)/(1+alpha*l2) *
-- max{|prox_v|-alpha*l1,0}
resourceSparseApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (ControlNode)
resourceSparseApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (ControlNode)
-- | Update '*var' according to the RMSProp algorithm.
--
-- Note that in dense implementation of this algorithm, ms and mom will
-- update even if the grad is zero, but in this sparse implementation, ms
-- and mom will not update in iterations during which the grad is zero.
--
-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta =
-- learning_rate * gradient / sqrt(mean_square + epsilon)
--
-- ms &lt;- rho * ms_{t-1} + (1-rho) * grad * grad mom &lt;- momentum *
-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var &lt;- var - mom
resourceSparseApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (ControlNode)
resourceSparseApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (ControlNode)
-- | Restores a tensor from checkpoint files.
--
-- Reads a tensor stored in one or several files. If there are several
-- files (for instance because a tensor was saved as slices),
-- <tt>file_pattern</tt> may contain wildcard symbols (<a>*</a> and
-- <tt>?</tt>) in the filename portion only, not in the directory
-- portion.
--
-- If a <tt>file_pattern</tt> matches several files,
-- <tt>preferred_shard</tt> can be used to hint in which file the
-- requested tensor is likely to be found. This op will first open the
-- file at index <tt>preferred_shard</tt> in the list of matching files
-- and try to restore tensors from that file. Only if some tensors or
-- tensor slices are not found in that first file, then the Op opens all
-- the files. Setting <tt>preferred_shard</tt> to match the value passed
-- as the <tt>shard</tt> input of a matching <tt>Save</tt> Op may speed
-- up Restore. This attribute only affects performance, not correctness.
-- The default value -1 means files are processed in order.
--
-- See also <tt>RestoreSlice</tt>.
restore :: (TensorType dt) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor Build dt
restore' :: (TensorType dt) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor Build dt
-- | Restores a tensor from checkpoint files.
--
-- This is like <tt>Restore</tt> except that restored tensor can be
-- listed as filling only a slice of a larger tensor.
-- <tt>shape_and_slice</tt> specifies the shape of the larger tensor and
-- the slice that the restored tensor covers.
--
-- The <tt>shape_and_slice</tt> input has the same format as the elements
-- of the <tt>shapes_and_slices</tt> input of the <tt>SaveSlices</tt> op.
restoreSlice :: (TensorType dt) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> Tensor Build dt
restoreSlice' :: (TensorType dt) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> Tensor Build dt
-- | Restores tensors from a V2 checkpoint.
--
-- For backward compatibility with the V1 format, this Op currently
-- allows restoring from a V1 checkpoint as well: - This Op first
-- attempts to find the V2 index file pointed to by "prefix", and if
-- found proceed to read it as a V2 checkpoint; - Otherwise the V1 read
-- path is invoked. Relying on this behavior is not recommended, as the
-- ability to fall back to read V1 might be deprecated and eventually
-- removed.
--
-- By default, restores the named tensors in full. If the caller wishes
-- to restore specific slices of stored tensors, "shape_and_slices"
-- should be non-empty strings and correspondingly well-formed.
--
-- Callers must ensure all the named tensors are indeed stored in the
-- checkpoint.
restoreV2 :: (TensorTypes dtypes) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (Build) dtypes
restoreV2' :: (TensorTypes dtypes) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (Build) dtypes
-- | Reverses specific dimensions of a tensor.
--
-- Given a <tt>tensor</tt>, and a <tt>bool</tt> tensor <tt>dims</tt>
-- representing the dimensions of <tt>tensor</tt>, this operation
-- reverses each dimension i of <tt>tensor</tt> where `dims[i]` is
-- <a>True</a>.
--
-- <tt>tensor</tt> can have up to 8 dimensions. The number of dimensions
-- of <tt>tensor</tt> must equal the number of elements in <tt>dims</tt>.
-- In other words:
--
-- `rank(tensor) = size(dims)`
--
-- For example:
--
-- ```prettyprint # tensor <tt>t</tt> is [[[[ 0, 1, 2, 3], # [ 4, 5, 6,
-- 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], #
-- [20, 21, 22, 23]]]] # tensor <tt>t</tt> shape is [1, 2, 3, 4]
--
-- # <tt>dims</tt> is [False, False, False, True] reverse(t, dims) ==&gt;
-- [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12],
-- [19, 18, 17, 16], [23, 22, 21, 20]]]]
--
-- # <tt>dims</tt> is [False, True, False, False] reverse(t, dims) ==&gt;
-- [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, 1, 2,
-- 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]]
--
-- # <tt>dims</tt> is [False, False, True, False] reverse(t, dims) ==&gt;
-- [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, 23], [16,
-- 17, 18, 19], [12, 13, 14, 15]]]] ```
reverse :: (OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Bool -> Tensor Build t
reverse' :: (OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Bool -> Tensor Build t
-- | Reverses variable length slices.
--
-- This op first slices <tt>input</tt> along the dimension
-- <tt>batch_dim</tt>, and for each slice <tt>i</tt>, reverses the first
-- `seq_lengths[i]` elements along the dimension <tt>seq_dim</tt>.
--
-- The elements of <tt>seq_lengths</tt> must obey `seq_lengths[i] &lt;
-- input.dims[seq_dim]`, and <tt>seq_lengths</tt> must be a vector of
-- length `input.dims[batch_dim]`.
--
-- The output slice <tt>i</tt> along dimension <tt>batch_dim</tt> is then
-- given by input slice <tt>i</tt>, with the first `seq_lengths[i]`
-- slices along dimension <tt>seq_dim</tt> reversed.
--
-- For example:
--
-- ```prettyprint # Given this: batch_dim = 0 seq_dim = 1 input.dims =
-- (4, 8, ...) seq_lengths = [7, 2, 3, 5]
--
-- # then slices of input are reversed on seq_dim, but only up to
-- seq_lengths: output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
-- output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] output[2, 0:3, :,
-- ...] = input[2, 3:0:-1, :, ...] output[3, 0:5, :, ...] = input[3,
-- 5:0:-1, :, ...]
--
-- # while entries past seq_lens are copied through: output[0, 7:, :,
-- ...] = input[0, 7:, :, ...] output[1, 2:, :, ...] = input[1, 2:, :,
-- ...] output[2, 3:, :, ...] = input[2, 3:, :, ...] output[3, 2:, :,
-- ...] = input[3, 2:, :, ...] ```
--
-- In contrast, if:
--
-- ```prettyprint # Given this: batch_dim = 2 seq_dim = 0 input.dims =
-- (8, ?, 4, ...) seq_lengths = [7, 2, 3, 5]
--
-- # then slices of input are reversed on seq_dim, but only up to
-- seq_lengths: output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
-- output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] output[0:3, :,
-- 2, :, ...] = input[3:0:-1, :, 2, :, ...] output[0:5, :, 3, :, ...] =
-- input[5:0:-1, :, 3, :, ...]
--
-- # while entries past seq_lens are copied through: output[7:, :, 0, :,
-- ...] = input[7:, :, 0, :, ...] output[2:, :, 1, :, ...] = input[2:, :,
-- 1, :, ...] output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
-- output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] ```
reverseSequence :: (TensorType t, OneOf '[Int32, Int64] tlen) => Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor Build t
reverseSequence' :: (TensorType t, OneOf '[Int32, Int64] tlen) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor Build t
-- | Reverses specific dimensions of a tensor.
--
-- NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
-- `tf.reverse_v2` is currently an alias that will be deprecated before
-- TF 1.0.
--
-- Given a <tt>tensor</tt>, and a <tt>int32</tt> tensor <tt>axis</tt>
-- representing the set of dimensions of <tt>tensor</tt> to reverse. This
-- operation reverses each dimension <tt>i</tt> for which there exists
-- <tt>j</tt> s.t. `axis[j] == i`.
--
-- <tt>tensor</tt> can have up to 8 dimensions. The number of dimensions
-- specified in <tt>axis</tt> may be 0 or more entries. If an index is
-- specified more than once, a InvalidArgument error is raised.
--
-- For example:
--
-- ```prettyprint # tensor <tt>t</tt> is [[[[ 0, 1, 2, 3], # [ 4, 5, 6,
-- 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], #
-- [20, 21, 22, 23]]]] # tensor <tt>t</tt> shape is [1, 2, 3, 4]
--
-- # <tt>dims</tt> is [3] or <tt>dims</tt> is -1 reverse(t, dims) ==&gt;
-- [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12],
-- [19, 18, 17, 16], [23, 22, 21, 20]]]]
--
-- # <tt>dims</tt> is '[1]' (or <tt>dims</tt> is '[-3]') reverse(t, dims)
-- ==&gt; [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0,
-- 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]]
--
-- # <tt>dims</tt> is '[2]' (or <tt>dims</tt> is '[-2]') reverse(t, dims)
-- ==&gt; [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22,
-- 23], [16, 17, 18, 19], [12, 13, 14, 15]]]] ```
reverseV2 :: (OneOf '[Int32, Int64] tidx, OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
reverseV2' :: (OneOf '[Int32, Int64] tidx, OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
-- | Returns element-wise integer closest to x.
--
-- If the result is midway between two representable values, the even
-- representable is chosen. For example:
--
-- ``` rint(-1.5) ==&gt; -2.0 rint(0.5000001) ==&gt; 1.0 rint([-1.7,
-- -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==&gt; [-2., -2., -0., 0., 2., 2.,
-- 2.] ```
rint :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
rint' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Rounds the values of a tensor to the nearest integer, element-wise.
--
-- Rounds half to even. Also known as bankers rounding. If you want to
-- round according to the current system rounding mode use std::cint.
round :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
round' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes reciprocal of square root of x element-wise.
--
-- I.e., \(y = 1 / sqrt{x}\).
rsqrt :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
rsqrt' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the gradient for the rsqrt of <tt>x</tt> wrt its input.
--
-- Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and
-- <tt>dy</tt> is the corresponding input gradient.
rsqrtGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
rsqrtGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Generate a single randomly distorted bounding box for an image.
--
-- Bounding box annotations are often supplied in addition to
-- ground-truth labels in image recognition or object localization tasks.
-- A common technique for training such a system is to randomly distort
-- an image while preserving its content, i.e. *data augmentation*. This
-- Op outputs a randomly distorted localization of an object, i.e.
-- bounding box, given an <tt>image_size</tt>, <tt>bounding_boxes</tt>
-- and a series of constraints.
--
-- The output of this Op is a single bounding box that may be used to
-- crop the original image. The output is returned as 3 tensors:
-- <tt>begin</tt>, <a>size</a> and <tt>bboxes</tt>. The first 2 tensors
-- can be fed directly into `tf.slice` to crop the image. The latter may
-- be supplied to `tf.image.draw_bounding_boxes` to visualize what the
-- bounding box looks like.
--
-- Bounding boxes are supplied and returned as `[y_min, x_min, y_max,
-- x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]`
-- relative to the width and height of the underlying image.
--
-- For example,
--
-- ```python # Generate a single distorted bounding box. begin, size,
-- bbox_for_draw = tf.image.sample_distorted_bounding_box(
-- tf.shape(image), bounding_boxes=bounding_boxes)
--
-- # Draw the bounding box in an image summary. image_with_box =
-- tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw)
-- tf.image_summary(<tt>images_with_box</tt>, image_with_box)
--
-- # Employ the bounding box to distort the image. distorted_image =
-- tf.slice(image, begin, size) ```
--
-- Note that if no bounding box information is available, setting
-- `use_image_if_no_bounding_boxes = true` will assume there is a single
-- implicit bounding box covering the whole image. If
-- <tt>use_image_if_no_bounding_boxes</tt> is false and no bounding boxes
-- are supplied, an error is raised.
sampleDistortedBoundingBox :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float))
sampleDistortedBoundingBox' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float))
-- | Saves the input tensors to disk.
--
-- The size of <tt>tensor_names</tt> must match the number of tensors in
-- `data`. `data[i]` is written to <tt>filename</tt> with name
-- `tensor_names[i]`.
--
-- See also <tt>SaveSlices</tt>.
save :: (MonadBuild m', TensorTypes t) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> TensorList (v'3) t -> m' (ControlNode)
save' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> TensorList (v'3) t -> m' (ControlNode)
-- | Saves input tensors slices to disk.
--
-- This is like <tt>Save</tt> except that tensors can be listed in the
-- saved file as being a slice of a larger tensor.
-- <tt>shapes_and_slices</tt> specifies the shape of the larger tensor
-- and the slice that this tensor covers. <tt>shapes_and_slices</tt> must
-- have as many elements as <tt>tensor_names</tt>.
--
-- Elements of the <tt>shapes_and_slices</tt> input must either be:
--
-- <ul>
-- <li>The empty string, in which case the corresponding tensor is saved
-- normally.</li>
-- <li>A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
-- <tt>dimI</tt> are the dimensions of the larger tensor and `slice-spec`
-- specifies what part is covered by the tensor to save.</li>
-- </ul>
--
-- `slice-spec` itself is a <tt>:</tt>-separated list:
-- `slice0:slice1:...:sliceN-1` where each <tt>sliceI</tt> is either:
--
-- <ul>
-- <li>The string <a>-</a> meaning that the slice covers all indices of
-- this dimension</li>
-- <li>`start,length` where <tt>start</tt> and <a>length</a> are
-- integers. In that case the slice covers <a>length</a> indices starting
-- at <tt>start</tt>.</li>
-- </ul>
--
-- See also <tt>Save</tt>.
saveSlices :: (MonadBuild m', TensorTypes t) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) t -> m' (ControlNode)
saveSlices' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) t -> m' (ControlNode)
-- | Saves tensors in V2 checkpoint format.
--
-- By default, saves the named tensors in full. If the caller wishes to
-- save specific slices of full tensors, "shape_and_slices" should be
-- non-empty strings and correspondingly well-formed.
saveV2 :: (MonadBuild m', TensorTypes dtypes) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) dtypes -> m' (ControlNode)
saveV2' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) dtypes -> m' (ControlNode)
-- | Outputs a <tt>Summary</tt> protocol buffer with scalar values.
--
-- The input <tt>tags</tt> and <tt>values</tt> must have the same shape.
-- The generated summary has a summary value for each tag-value pair in
-- <tt>tags</tt> and <tt>values</tt>.
scalarSummary :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
scalarSummary' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
-- | Adds sparse updates to a variable reference.
--
-- This operation computes
--
-- # Scalar indices ref[indices, ...] += updates[...]
--
-- # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...]
--
-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...]
-- += updates[i, ..., j, ...]
--
-- This operation outputs <tt>ref</tt> after the update is done. This
-- makes it easier to chain operations that need to use the reset value.
--
-- Duplicate entries are handled correctly: if multiple <tt>indices</tt>
-- reference the same location, their contributions add.
--
-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/ScatterAdd.png" alt</a> <a>/div</a>
scatterAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
scatterAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
-- | Divides a variable reference by sparse updates.
--
-- This operation computes
--
-- # Scalar indices ref[indices, ...] /= updates[...]
--
-- # Vector indices (for each i) ref[indices[i], ...] /= updates[i, ...]
--
-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...]
-- /= updates[i, ..., j, ...]
--
-- This operation outputs <tt>ref</tt> after the update is done. This
-- makes it easier to chain operations that need to use the reset value.
--
-- Duplicate entries are handled correctly: if multiple <tt>indices</tt>
-- reference the same location, their contributions divide.
--
-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
scatterDiv :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
scatterDiv' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
-- | Multiplies sparse updates into a variable reference.
--
-- This operation computes
--
-- # Scalar indices ref[indices, ...] *= updates[...]
--
-- # Vector indices (for each i) ref[indices[i], ...] *= updates[i, ...]
--
-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...]
-- *= updates[i, ..., j, ...]
--
-- This operation outputs <tt>ref</tt> after the update is done. This
-- makes it easier to chain operations that need to use the reset value.
--
-- Duplicate entries are handled correctly: if multiple <tt>indices</tt>
-- reference the same location, their contributions multiply.
--
-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
scatterMul :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
scatterMul' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
-- | Creates a new tensor by applying sparse <tt>updates</tt> to individual
--
-- values or slices within a zero tensor of the given <a>shape</a> tensor
-- according to indices. This operator is the inverse of the
-- <a>tf.gather_nd</a> operator which extracts values or slices from a
-- given tensor.
--
-- TODO(simister): Add a link to Variable.<b>getitem</b> documentation on
-- slice syntax.
--
-- <a>shape</a> is a <tt>TensorShape</tt> with rank <tt>P</tt> and
-- <tt>indices</tt> is a <a>Tensor</a> of rank <tt>Q</tt>.
--
-- <tt>indices</tt> must be integer tensor, containing indices into
-- <a>shape</a>. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 &lt;
-- K &lt;= P`.
--
-- The innermost dimension of <tt>indices</tt> (with length <tt>K</tt>)
-- corresponds to indices into elements (if `K = P`) or slices (if `K
-- &lt; P`) along the <tt>K</tt>th dimension of <a>shape</a>.
--
-- <tt>updates</tt> is Tensor of rank `Q-1+P-K` with shape:
--
-- ``` [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]]. ```
--
-- The simplest form of scatter is to insert individual elements in a
-- tensor by index. For example, say we want to insert 4 scattered
-- elements in a rank-1 tensor with 8 elements.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/ScatterNd1.png" alt</a> <a>/div</a>
--
-- In Python, this scatter operation would look like this:
--
-- indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9,
-- 10, 11, 12]) shape = tf.constant([8]) scatter = tf.scatter_nd(indices,
-- updates, shape) with tf.Session() as sess: print sess.run(scatter)
--
-- The resulting tensor would look like this:
--
-- <ul>
-- <li><i>0, 11, 0, 10, 9, 0, 0, 12</i></li>
-- </ul>
--
-- We can also, insert entire slices of a higher rank tensor all at once.
-- For example, if we wanted to insert two slices in the first dimension
-- of a rank-3 tensor with two matrices of new values.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/ScatterNd2.png" alt</a> <a>/div</a>
--
-- In Python, this scatter operation would look like this:
--
-- indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5,
-- 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6,
-- 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) shape = tf.constant([4, 4, 4])
-- scatter = tf.scatter_nd(indices, updates, shape) with tf.Session() as
-- sess: print sess.run(scatter)
--
-- The resulting tensor would look like this:
--
-- <ul>
-- <li><i>[[5, 5, 5, 5</i> , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8,
-- 8]],</li>
-- <li><i>[0, 0, 0, 0</i> , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0,
-- 0]],</li>
-- <li><i>[5, 5, 5, 5</i> , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8,
-- 8]],</li>
-- <li><i>[0, 0, 0, 0</i> , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0,
-- 0]]]</li>
-- </ul>
scatterNd :: (TensorType t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor Build t
scatterNd' :: (TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor Build t
-- | Applies sparse addition between <tt>updates</tt> and individual values
-- or slices
--
-- within a given variable according to <tt>indices</tt>.
--
-- <tt>ref</tt> is a <a>Tensor</a> with rank <tt>P</tt> and
-- <tt>indices</tt> is a <a>Tensor</a> of rank <tt>Q</tt>.
--
-- <tt>indices</tt> must be integer tensor, containing indices into
-- <tt>ref</tt>. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 &lt;
-- K &lt;= P`.
--
-- The innermost dimension of <tt>indices</tt> (with length <tt>K</tt>)
-- corresponds to indices into elements (if `K = P`) or slices (if `K
-- &lt; P`) along the <tt>K</tt>th dimension of <tt>ref</tt>.
--
-- <tt>updates</tt> is <a>Tensor</a> of rank `Q-1+P-K` with shape:
--
-- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ```
--
-- For example, say we want to add 4 scattered elements to a rank-1
-- tensor to 8 elements. In Python, that addition would look like this:
--
-- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices =
-- tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11,
-- 12]) add = tf.scatter_nd_add(ref, indices, updates) with tf.Session()
-- as sess: print sess.run(add)
--
-- The resulting update to ref would look like this:
--
-- <ul>
-- <li><i>1, 13, 3, 14, 14, 6, 7, 20</i></li>
-- </ul>
--
-- See <a>tf.scatter_nd</a> for more details about how to make updates to
-- slices.
scatterNdAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
scatterNdAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
-- | Applies sparse subtraction between <tt>updates</tt> and individual
-- values or slices
--
-- within a given variable according to <tt>indices</tt>.
--
-- <tt>ref</tt> is a <a>Tensor</a> with rank <tt>P</tt> and
-- <tt>indices</tt> is a <a>Tensor</a> of rank <tt>Q</tt>.
--
-- <tt>indices</tt> must be integer tensor, containing indices into
-- <tt>ref</tt>. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 &lt;
-- K &lt;= P`.
--
-- The innermost dimension of <tt>indices</tt> (with length <tt>K</tt>)
-- corresponds to indices into elements (if `K = P`) or slices (if `K
-- &lt; P`) along the <tt>K</tt>th dimension of <tt>ref</tt>.
--
-- <tt>updates</tt> is <a>Tensor</a> of rank `Q-1+P-K` with shape:
--
-- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ```
--
-- For example, say we want to subtract 4 scattered elements from a
-- rank-1 tensor with 8 elements. In Python, that subtraction would look
-- like this:
--
-- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices =
-- tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11,
-- 12]) sub = tf.scatter_nd_sub(ref, indices, updates) with tf.Session()
-- as sess: print sess.run(sub)
--
-- The resulting update to ref would look like this:
--
-- <ul>
-- <li><i>1, -9, 3, -6, -4, 6, 7, -4</i></li>
-- </ul>
--
-- See <a>tf.scatter_nd</a> for more details about how to make updates to
-- slices.
scatterNdSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
scatterNdSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
-- | Applies sparse <tt>updates</tt> to individual values or slices within
-- a given
--
-- variable according to <tt>indices</tt>.
--
-- <tt>ref</tt> is a <a>Tensor</a> with rank <tt>P</tt> and
-- <tt>indices</tt> is a <a>Tensor</a> of rank <tt>Q</tt>.
--
-- <tt>indices</tt> must be integer tensor, containing indices into
-- <tt>ref</tt>. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 &lt;
-- K &lt;= P`.
--
-- The innermost dimension of <tt>indices</tt> (with length <tt>K</tt>)
-- corresponds to indices into elements (if `K = P`) or slices (if `K
-- &lt; P`) along the <tt>K</tt>th dimension of <tt>ref</tt>.
--
-- <tt>updates</tt> is <a>Tensor</a> of rank `Q-1+P-K` with shape:
--
-- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ```
--
-- For example, say we want to update 4 scattered elements to a rank-1
-- tensor to 8 elements. In Python, that update would look like this:
--
-- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices =
-- tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11,
-- 12]) update = tf.scatter_nd_update(ref, indices, updates) with
-- tf.Session() as sess: print sess.run(update)
--
-- The resulting update to ref would look like this:
--
-- <ul>
-- <li><i>1, 11, 3, 10, 9, 6, 7, 12</i></li>
-- </ul>
--
-- See <a>tf.scatter_nd</a> for more details about how to make updates to
-- slices.
scatterNdUpdate :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
scatterNdUpdate' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
-- | Subtracts sparse updates to a variable reference.
--
-- # Scalar indices ref[indices, ...] -= updates[...]
--
-- # Vector indices (for each i) ref[indices[i], ...] -= updates[i, ...]
--
-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...]
-- -= updates[i, ..., j, ...]
--
-- This operation outputs <tt>ref</tt> after the update is done. This
-- makes it easier to chain operations that need to use the reset value.
--
-- Duplicate entries are handled correctly: if multiple <tt>indices</tt>
-- reference the same location, their (negated) contributions add.
--
-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/ScatterSub.png" alt</a> <a>/div</a>
scatterSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
scatterSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
-- | Applies sparse updates to a variable reference.
--
-- This operation computes
--
-- # Scalar indices ref[indices, ...] = updates[...]
--
-- # Vector indices (for each i) ref[indices[i], ...] = updates[i, ...]
--
-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...]
-- = updates[i, ..., j, ...]
--
-- This operation outputs <tt>ref</tt> after the update is done. This
-- makes it easier to chain operations that need to use the reset value.
--
-- If values in <tt>ref</tt> is to be updated more than once, because
-- there are duplicate entries in <tt>indices</tt>, the order at which
-- the updates happen for each value is undefined.
--
-- Requires `updates.shape = indices.shape + ref.shape[1:]`.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/ScatterUpdate.png" alt</a> <a>/div</a>
scatterUpdate :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
scatterUpdate' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
-- | Computes fingerprints of the input strings.
sdcaFprint :: Tensor v'1 ByteString -> Tensor Build Int64
sdcaFprint' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Int64
-- | Distributed version of Stochastic Dual Coordinate Ascent (SDCA)
-- optimizer for
--
-- linear models with L1 + L2 regularization. As global optimization
-- objective is strongly-convex, the optimizer optimizes the dual
-- objective at each step. The optimizer applies each update one example
-- at a time. Examples are sampled uniformly, and the optimizer is
-- learning rate free and enjoys linear convergence rate.
--
-- Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai;
-- Zhang, Tong. 2012 arXiv1211.2717S:
-- <a>http://arxiv.org/pdf/1211.2717v1.pdf</a>
--
-- Loss objective = sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|
--
-- Adding vs. Averaging in Distributed Primal-Dual Optimization. Chenxin
-- Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik,
-- Martin Takac <a>http://arxiv.org/abs/1502.03508</a>
--
-- Stochastic Dual Coordinate Ascent with Adaptive Probabilities Dominik
-- Csiba, Zheng Qu, Peter Richtarik
-- <a>https://arxiv.org/abs/1502.08053</a>
sdcaOptimizer :: Float -> Float -> Int64 -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 Int64] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> Tensor v'5 Float -> Tensor v'6 Float -> [Tensor v'7 Int64] -> [Tensor v'8 Float] -> [Tensor v'9 Float] -> Tensor v'10 Float -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float])
sdcaOptimizer' :: OpParams -> Float -> Float -> Int64 -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 Int64] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> Tensor v'5 Float -> Tensor v'6 Float -> [Tensor v'7 Int64] -> [Tensor v'8 Float] -> [Tensor v'9 Float] -> Tensor v'10 Float -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float])
-- | Applies L1 regularization shrink step on the parameters.
sdcaShrinkL1 :: (MonadBuild m') => Float -> Float -> [Tensor Ref Float] -> m' (ControlNode)
sdcaShrinkL1' :: (MonadBuild m') => OpParams -> Float -> Float -> [Tensor Ref Float] -> m' (ControlNode)
-- | Computes the maximum along segments of a tensor.
--
-- Read <a>the section on Segmentation</a> for an explanation of
-- segments.
--
-- Computes a tensor such that \(output_i = max_j(data_j)\) where
-- <a>max</a> is over <tt>j</tt> such that `segment_ids[j] == i`.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/SegmentMax.png" alt</a> <a>/div</a>
segmentMax :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
segmentMax' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
-- | Computes the mean along segments of a tensor.
--
-- Read <a>the section on Segmentation</a> for an explanation of
-- segments.
--
-- Computes a tensor such that \(output_i = frac{sum_j data_j}{N}\) where
-- <a>mean</a> is over <tt>j</tt> such that `segment_ids[j] == i` and
-- <tt>N</tt> is the total number of values summed.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/SegmentMean.png" alt</a> <a>/div</a>
segmentMean :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
segmentMean' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
-- | Computes the minimum along segments of a tensor.
--
-- Read <a>the section on Segmentation</a> for an explanation of
-- segments.
--
-- Computes a tensor such that \(output_i = min_j(data_j)\) where
-- <a>min</a> is over <tt>j</tt> such that `segment_ids[j] == i`.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/SegmentMin.png" alt</a> <a>/div</a>
segmentMin :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
segmentMin' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
-- | Computes the product along segments of a tensor.
--
-- Read <a>the section on Segmentation</a> for an explanation of
-- segments.
--
-- Computes a tensor such that \(output_i = prod_j data_j\) where the
-- product is over <tt>j</tt> such that `segment_ids[j] == i`.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/SegmentProd.png" alt</a> <a>/div</a>
segmentProd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
segmentProd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
-- | Computes the sum along segments of a tensor.
--
-- Read <a>the section on Segmentation</a> for an explanation of
-- segments.
--
-- Computes a tensor such that \(output_i = sum_j data_j\) where sum is
-- over <tt>j</tt> such that `segment_ids[j] == i`.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/SegmentSum.png" alt</a> <a>/div</a>
segmentSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
segmentSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
-- | Selects elements from <tt>t</tt> or <tt>e</tt>, depending on
-- <tt>condition</tt>.
--
-- The <tt>t</tt>, and <tt>e</tt> tensors must all have the same shape,
-- and the output will also have that shape.
--
-- The <tt>condition</tt> tensor must be a scalar if <tt>t</tt> and
-- <tt>e</tt> are scalars. If <tt>t</tt> and <tt>e</tt> are vectors or
-- higher rank, then <tt>condition</tt> must be either a scalar, a vector
-- with size matching the first dimension of <tt>t</tt>, or must have the
-- same shape as <tt>t</tt>.
--
-- The <tt>condition</tt> tensor acts as a mask that chooses, based on
-- the value at each element, whether the corresponding element / row in
-- the output should be taken from <tt>t</tt> (if true) or <tt>e</tt> (if
-- false).
--
-- If <tt>condition</tt> is a vector and <tt>t</tt> and <tt>e</tt> are
-- higher rank matrices, then it chooses which row (outer dimension) to
-- copy from <tt>t</tt> and <tt>e</tt>. If <tt>condition</tt> has the
-- same shape as <tt>t</tt> and <tt>e</tt>, then it chooses which element
-- to copy from <tt>t</tt> and <tt>e</tt>.
--
-- For example:
--
-- ```prettyprint # <tt>condition</tt> tensor is [[True, False] # [False,
-- True]] # <tt>t</tt> is [[1, 2], # [3, 4]] # <tt>e</tt> is [[5, 6], #
-- [7, 8]] select(condition, t, e) ==&gt; [[1, 6], [7, 4]]
--
-- # <tt>condition</tt> tensor is [True, False] # <tt>t</tt> is [[1, 2],
-- # [3, 4]] # <tt>e</tt> is [[5, 6], # [7, 8]] select(condition, t, e)
-- ==&gt; [[1, 2], [7, 8]]
--
-- ```
select :: (TensorType t) => Tensor v'1 Bool -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
select' :: (TensorType t) => OpParams -> Tensor v'1 Bool -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
-- | Computes the Eigen Decomposition of a batch of square self-adjoint
-- matrices.
--
-- The input is a tensor of shape `[..., M, M]` whose inner-most 2
-- dimensions form square matrices, with the same constraints as the
-- single matrix SelfAdjointEig.
--
-- The result is a [..., M+1, M] matrix with [..., 0,:] containing the
-- eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.
selfAdjointEig :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t
selfAdjointEig' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the eigen decomposition of one or more square self-adjoint
-- matrices.
--
-- Computes the eigenvalues and (optionally) eigenvectors of each inner
-- matrix in <tt>input</tt> such that `input[..., :, :] = v[..., :, :] *
-- diag(e[..., :])`.
--
-- ```prettyprint # a is a tensor. # e is a tensor of eigenvalues. # v is
-- a tensor of eigenvectors. e, v = self_adjoint_eig(a) e =
-- self_adjoint_eig(a, compute_v=False) ```
selfAdjointEigV2 :: (OneOf '[Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t)
selfAdjointEigV2' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t)
-- | Serialize an <tt>N</tt>-minibatch <tt>SparseTensor</tt> into an `[N,
-- 3]` string <a>Tensor</a>.
--
-- The <tt>SparseTensor</tt> must have rank <tt>R</tt> greater than 1,
-- and the first dimension is treated as the minibatch dimension.
-- Elements of the <tt>SparseTensor</tt> must be sorted in increasing
-- order of this first dimension. The serialized <tt>SparseTensor</tt>
-- objects going into each row of <tt>serialized_sparse</tt> will have
-- rank `R-1`.
--
-- The minibatch size <tt>N</tt> is extracted from `sparse_shape[0]`.
serializeManySparse :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString
serializeManySparse' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString
-- | Serialize a <tt>SparseTensor</tt> into a string 3-vector (1-D
-- <a>Tensor</a>) object.
serializeSparse :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString
serializeSparse' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString
-- | Number of unique elements along last dimension of input <tt>set</tt>.
--
-- Input <tt>set</tt> is a <tt>SparseTensor</tt> represented by
-- <tt>set_indices</tt>, <tt>set_values</tt>, and <tt>set_shape</tt>. The
-- last dimension contains values in a set, duplicates are allowed but
-- ignored.
--
-- If <tt>validate_indices</tt> is <a>True</a>, this op validates the
-- order and range of <tt>set</tt> indices.
setSize :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build Int32
setSize' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build Int32
-- | Returns the shape of a tensor.
--
-- This operation returns a 1-D integer tensor representing the shape of
-- <tt>input</tt>.
--
-- For example:
--
-- ```prettyprint # <tt>t</tt> is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3],
-- [4, 4, 4]]] shape(t) ==&gt; [2, 2, 3] ```
shape :: (TensorType t, OneOf '[Int32, Int64] out_type) => Tensor v'1 t -> Tensor Build out_type
shape' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type
-- | Returns shape of tensors.
--
-- This operation returns N 1-D integer tensors representing shape of
-- `input[i]s`.
shapeN :: (TensorType t, OneOf '[Int32, Int64] out_type) => [Tensor v'1 t] -> [Tensor Build out_type]
shapeN' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> [Tensor v'1 t] -> [Tensor Build out_type]
-- | Generate a sharded filename. The filename is printf formatted as
--
-- %s-%05d-of-%05d, basename, shard, num_shards.
shardedFilename :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor Build ByteString
shardedFilename' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor Build ByteString
-- | Generate a glob pattern matching all sharded file names.
shardedFilespec :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString
shardedFilespec' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString
-- | Computes sigmoid of <tt>x</tt> element-wise.
--
-- Specifically, `y = 1 / (1 + exp(-x))`.
sigmoid :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
sigmoid' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the gradient of the sigmoid of <tt>x</tt> wrt its input.
--
-- Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
-- <tt>dy</tt> is the corresponding input gradient.
sigmoidGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
sigmoidGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Returns an element-wise indication of the sign of a number.
--
-- `y = sign(x) = -1` if `x <a>0 if `x == 0`; 1 if `x</a> 0`.
--
-- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y
-- = 0`.
sign :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
sign' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes sin of x element-wise.
sin :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
sin' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns the size of a tensor.
--
-- This operation returns an integer representing the number of elements
-- in <tt>input</tt>.
--
-- For example:
--
-- ```prettyprint # <tt>t</tt> is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3],
-- [4, 4, 4]]]] size(t) ==&gt; 12 ```
size :: (TensorType t, OneOf '[Int32, Int64] out_type) => Tensor v'1 t -> Tensor Build out_type
size' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type
-- | Parses a text file and creates a batch of examples.
skipgram :: (MonadBuild m') => Int64 -> m' ((Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32))
skipgram' :: (MonadBuild m') => OpParams -> Int64 -> m' ((Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32))
-- | Return a slice from <tt>input</tt>.
--
-- The output tensor is a tensor with dimensions described by <a>size</a>
-- whose values are extracted from <tt>input</tt> starting at the offsets
-- in <tt>begin</tt>.
--
-- <ul>
-- <li>Requirements*: 0 &lt;= begin[i] &lt;= begin[i] + size[i] &lt;= Di
-- for i in [0, n)</li>
-- </ul>
slice :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor Build t
slice' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor Build t
-- | Computes softmax activations.
--
-- For each batch <tt>i</tt> and class <tt>j</tt> we have
--
-- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
softmax :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
softmax' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes softmax cross entropy cost and gradients to backpropagate.
--
-- Inputs are the logits, not probabilities.
softmaxCrossEntropyWithLogits :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
softmaxCrossEntropyWithLogits' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
-- | Computes softplus: `log(exp(features) + 1)`.
softplus :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t
softplus' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes softplus gradients for a softplus operation.
softplusGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
softplusGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes softsign: `features / (abs(features) + 1)`.
softsign :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t
softsign' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes softsign gradients for a softsign operation.
softsignGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
softsignGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | SpaceToBatch for 4-D tensors of type T.
--
-- This is a legacy version of the more general SpaceToBatchND.
--
-- Zero-pads and then rearranges (permutes) blocks of spatial data into
-- batch. More specifically, this op outputs a copy of the input tensor
-- where values from the <tt>height</tt> and <tt>width</tt> dimensions
-- are moved to the <tt>batch</tt> dimension. After the zero-padding,
-- both <tt>height</tt> and <tt>width</tt> of the input must be divisible
-- by the block size.
spaceToBatch :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Int64 -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
spaceToBatch' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
-- | SpaceToBatch for N-D tensors of type T.
--
-- This operation divides "spatial" dimensions `[1, ..., M]` of the input
-- into a grid of blocks of shape <tt>block_shape</tt>, and interleaves
-- these blocks with the "batch" dimension (0) such that in the output,
-- the spatial dimensions `[1, ..., M]` correspond to the position within
-- the grid, and the batch dimension combines both the position within a
-- spatial block and the original batch position. Prior to division into
-- blocks, the spatial dimensions of the input are optionally zero padded
-- according to <tt>paddings</tt>. See below for a precise description.
spaceToBatchND :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tpaddings -> Tensor Build t
spaceToBatchND' :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tpaddings -> Tensor Build t
-- | SpaceToDepth for tensors of type T.
--
-- Rearranges blocks of spatial data, into depth. More specifically, this
-- op outputs a copy of the input tensor where values from the
-- <tt>height</tt> and <tt>width</tt> dimensions are moved to the
-- <tt>depth</tt> dimension. The attr <tt>block_size</tt> indicates the
-- input block size and how the data is moved.
--
-- <ul>
-- <li>Non-overlapping blocks of size `block_size x block size` are
-- rearranged into depth at each location.</li>
-- <li>The depth of the output tensor is `input_depth * block_size *
-- block_size`.</li>
-- <li>The input tensor's height and width must be divisible by
-- block_size.</li>
-- </ul>
--
-- That is, assuming the input is in the shape: `[batch, height, width,
-- depth]`, the shape of the output will be: `[batch,
-- height<i>block_size, width</i>block_size,
-- depth*block_size*block_size]`
--
-- This operation requires that the input tensor be of rank 4, and that
-- <tt>block_size</tt> be &gt;=1 and a divisor of both the input
-- <tt>height</tt> and <tt>width</tt>.
--
-- This operation is useful for resizing the activations between
-- convolutions (but keeping all data), e.g. instead of pooling. It is
-- also useful for training purely convolutional models.
--
-- For example, given this input of shape `[1, 2, 2, 1]`, and block_size
-- of 2:
--
-- ```prettyprint x = [[[[1], [2]], [[3], [4]]]] ```
--
-- This operation will output a tensor of shape `[1, 1, 1, 4]`:
--
-- ```prettyprint [[[[1, 2, 3, 4]]]] ```
--
-- Here, the input has a batch of 1 and each batch element has shape `[2,
-- 2, 1]`, the corresponding output will have a single element (i.e.
-- width and height are both 1) and will have a depth of 4 channels (1 *
-- block_size * block_size). The output element shape is `[1, 1, 4]`.
--
-- For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`,
-- e.g.
--
-- ```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11,
-- 12]]]] ```
--
-- This operation, for block_size of 2, will return the following tensor
-- of shape `[1, 1, 1, 12]`
--
-- ```prettyprint [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ```
--
-- Similarly, for the following input of shape `[1 4 4 1]`, and a block
-- size of 2:
--
-- ```prettyprint x = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]], [[9],
-- [10], [13], [14]], [[11], [12], [15], [16]]]] ```
--
-- the operator will return the following tensor of shape `[1 2 2 4]`:
--
-- ```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
-- [13, 14, 15, 16]]]] ```
spaceToDepth :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor Build t
spaceToDepth' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor Build t
-- | Applies a sparse gradient to a given accumulator. Does not add if
-- local_step is
--
-- lesser than the accumulator's global_step.
sparseAccumulatorApplyGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Bool -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 dtype -> Tensor v'5 Int64 -> m' (ControlNode)
sparseAccumulatorApplyGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Bool -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 dtype -> Tensor v'5 Int64 -> m' (ControlNode)
-- | Extracts the average sparse gradient in the given
-- SparseConditionalAccumulator,
--
-- provided that sufficient (i.e., more than num_required) gradients have
-- been accumulated. The op will blocks until sufficient gradients have
-- been accumulated. If the accumulator has already aggregated more than
-- num_required gradients, it will return its average of the accumulated
-- gradients. Also automatically increments the recorded global_step in
-- the accumulator by 1, and resets the aggregate to 0.
sparseAccumulatorTakeGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64))
sparseAccumulatorTakeGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64))
-- | Adds two <tt>SparseTensor</tt> objects to produce another
-- <tt>SparseTensor</tt>.
--
-- The input <tt>SparseTensor</tt> objects' indices are assumed ordered
-- in standard lexicographic order. If this is not the case, before this
-- step run <tt>SparseReorder</tt> to restore index ordering.
--
-- By default, if two values sum to zero at some index, the output
-- <tt>SparseTensor</tt> would still include that particular location in
-- its index, storing a zero in the corresponding value slot. To override
-- this, callers can specify <tt>thresh</tt>, indicating that if the sum
-- has a magnitude strictly smaller than <tt>thresh</tt>, its
-- corresponding value and index would then not be included. In
-- particular, `thresh == 0` (default) means everything is kept and
-- actual thresholding happens only for a positive value.
--
-- In the following shapes, <tt>nnz</tt> is the count after taking
-- <tt>thresh</tt> into account.
sparseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] treal) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor v'7 treal -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
sparseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] treal) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor v'7 treal -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
-- | The gradient operator for the SparseAdd op.
--
-- The SparseAdd op calculates A + B, where A, B, and the sum are all
-- represented as <tt>SparseTensor</tt> objects. This op takes in the
-- upstream gradient w.r.t. non-empty values of the sum, and outputs the
-- gradients w.r.t. the non-empty values of A and B.
sparseAddGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> (Tensor Build t, Tensor Build t)
sparseAddGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> (Tensor Build t, Tensor Build t)
-- | var: Should be from a Variable().
sparseApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (Tensor Ref t)
sparseApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (Tensor Ref t)
-- | Update relevant entries in '*var' and '*accum' according to the
-- adagrad scheme.
--
-- That is for rows we have grad for, we update var and accum as follows:
-- accum += grad * grad var -= lr * grad * (1 / sqrt(accum))
sparseApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (Tensor Ref t)
sparseApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (Tensor Ref t)
-- | Update entries in '*var' and '*accum' according to the proximal
-- adagrad scheme.
sparseApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (Tensor Ref t)
sparseApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (Tensor Ref t)
-- | Update '*var' according to the centered RMSProp algorithm.
--
-- The centered RMSProp algorithm uses an estimate of the centered second
-- moment (i.e., the variance) for normalization, as opposed to regular
-- RMSProp, which uses the (uncentered) second moment. This often helps
-- with training, but is slightly more expensive in terms of computation
-- and memory.
--
-- Note that in dense implementation of this algorithm, mg, ms, and mom
-- will update even if the grad is zero, but in this sparse
-- implementation, mg, ms, and mom will not update in iterations during
-- which the grad is zero.
--
-- mean_square = decay * mean_square + (1-decay) * gradient ** 2
-- mean_grad = decay * mean_grad + (1-decay) * gradient Delta =
-- learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad **
-- 2)
--
-- ms &lt;- rho * ms_{t-1} + (1-rho) * grad * grad mom &lt;- momentum *
-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var &lt;- var - mom
sparseApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (Tensor Ref t)
sparseApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (Tensor Ref t)
-- | Update relevant entries in '*var' according to the Ftrl-proximal
-- scheme.
--
-- That is for rows we have grad for, we update var, accum and linear as
-- follows: accum_new = accum + grad * grad linear += grad +
-- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0
-- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 -
-- linear) / quadratic if |linear| &gt; l1 else 0.0 accum = accum_new
sparseApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t)
sparseApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t)
-- | Update relevant entries in '*var' and '*accum' according to the
-- momentum scheme.
--
-- Set use_nesterov = True if you want to use Nesterov momentum.
--
-- That is for rows we have grad for, we update var and accum as follows:
--
-- accum = accum * momentum + grad var -= lr * accum
sparseApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (Tensor Ref t)
sparseApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (Tensor Ref t)
-- | Sparse update entries in '*var' and '*accum' according to FOBOS
-- algorithm.
--
-- That is for rows we have grad for, we update var and accum as follows:
-- accum += grad * grad prox_v = var prox_v -= lr * grad * (1 /
-- sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
sparseApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (Tensor Ref t)
sparseApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (Tensor Ref t)
-- | Sparse update '*var' as FOBOS algorithm with fixed learning rate.
--
-- That is for rows we have grad for, we update var as follows: prox_v =
-- var - alpha * grad var = sign(prox_v)/(1+alpha*l2) *
-- max{|prox_v|-alpha*l1,0}
sparseApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (Tensor Ref t)
sparseApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (Tensor Ref t)
-- | Update '*var' according to the RMSProp algorithm.
--
-- Note that in dense implementation of this algorithm, ms and mom will
-- update even if the grad is zero, but in this sparse implementation, ms
-- and mom will not update in iterations during which the grad is zero.
--
-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta =
-- learning_rate * gradient / sqrt(mean_square + epsilon)
--
-- ms &lt;- rho * ms_{t-1} + (1-rho) * grad * grad mom &lt;- momentum *
-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var &lt;- var - mom
sparseApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (Tensor Ref t)
sparseApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (Tensor Ref t)
-- | Concatenates a list of <tt>SparseTensor</tt> along the specified
-- dimension.
--
-- Concatenation is with respect to the dense versions of these sparse
-- tensors. It is assumed that each input is a <tt>SparseTensor</tt>
-- whose elements are ordered along increasing dimension number.
--
-- All inputs' shapes must match, except for the concat dimension. The
-- <tt>indices</tt>, <tt>values</tt>, and <tt>shapes</tt> lists must have
-- the same length.
--
-- The output shape is identical to the inputs', except along the concat
-- dimension, where it is the sum of the inputs' sizes along that
-- dimension.
--
-- The output elements will be resorted to preserve the sort order along
-- increasing dimension number.
--
-- This op runs in `O(M log M)` time, where <tt>M</tt> is the total
-- number of non-empty values across all inputs. This is due to the need
-- for an internal sort in order to concatenate efficiently across an
-- arbitrary dimension.
--
-- For example, if `concat_dim = 1` and the inputs are
--
-- sp_inputs[0]: shape = [2, 3] [0, 2]: "a" [1, 0]: "b" [1, 1]: "c"
--
-- sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e"
--
-- then the output will be
--
-- shape = [2, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [1, 1]:
-- "c"
--
-- Graphically this is equivalent to doing
--
-- <ul>
-- <li><i> a</i> concat [ d e ] = [ a d e ]</li>
-- <li><i>b c </i> [ ] [b c ]</li>
-- </ul>
sparseConcat :: (TensorType t) => Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 t] -> [Tensor v'3 Int64] -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
sparseConcat' :: (TensorType t) => OpParams -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 t] -> [Tensor v'3 Int64] -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
-- | A conditional accumulator for aggregating sparse gradients. The
-- accumulator
--
-- accepts gradients marked with local_step greater or equal to the most
-- recent global_step known to the accumulator. The average can be
-- extracted from the accumulator, provided sufficient gradients have
-- been accumulated. Extracting the average automatically resets the
-- aggregate to 0, and increments the global_step recorded by the
-- accumulator.
sparseConditionalAccumulator :: (MonadBuild m') => DataType -> Shape -> m' (Tensor Ref ByteString)
sparseConditionalAccumulator' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (Tensor Ref ByteString)
-- | Adds up a SparseTensor and a dense Tensor, using these special rules:
--
-- <ol>
-- <li>Broadcasts the dense side to have the same shape as the sparse
-- side, if eligible;</li>
-- <li>Then, only the dense values pointed to by the indices of the
-- SparseTensor participate in the cwise addition.</li>
-- </ol>
--
-- By these rules, the result is a logical SparseTensor with exactly the
-- same indices and shape, but possibly with different non-zero values.
-- The output of this Op is the resultant non-zero values.
sparseDenseCwiseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
sparseDenseCwiseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
-- | Component-wise divides a SparseTensor by a dense Tensor.
--
-- <ul>
-- <li>Limitation*: this Op only broadcasts the dense side to the sparse
-- side, but not the other direction.</li>
-- </ul>
sparseDenseCwiseDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
sparseDenseCwiseDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
-- | Component-wise multiplies a SparseTensor by a dense Tensor.
--
-- The output locations corresponding to the implicitly zero elements in
-- the sparse tensor will be zero (i.e., will not take up storage space),
-- regardless of the contents of the dense tensor (even if it's +/-INF
-- and that INF*0 == NaN).
--
-- <ul>
-- <li>Limitation*: this Op only broadcasts the dense side to the sparse
-- side, but not the other direction.</li>
-- </ul>
sparseDenseCwiseMul :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
sparseDenseCwiseMul' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
-- | Multiply matrix "a" by matrix "b".
--
-- The inputs must be two-dimensional matrices and the inner dimension of
-- "a" must match the outer dimension of "b". This op is optimized for
-- the case where at least one of "a" or "b" is sparse. The breakeven for
-- using this versus a dense matrix multiply on one platform was 30% zero
-- values in the sparse matrix.
sparseMatMul :: (OneOf '[Word16, Float] ta, OneOf '[Word16, Float] tb) => Tensor v'1 ta -> Tensor v'2 tb -> Tensor Build Float
sparseMatMul' :: (OneOf '[Word16, Float] ta, OneOf '[Word16, Float] tb) => OpParams -> Tensor v'1 ta -> Tensor v'2 tb -> Tensor Build Float
-- | Computes the sum of elements across dimensions of a SparseTensor.
--
-- This Op takes a SparseTensor and is the sparse counterpart to
-- `tf.reduce_sum()`. In particular, this Op also returns a dense
-- <a>Tensor</a> instead of a sparse one.
--
-- Reduces <tt>sp_input</tt> along the dimensions given in
-- <tt>reduction_axes</tt>. Unless <tt>keep_dims</tt> is true, the rank
-- of the tensor is reduced by 1 for each entry in
-- <tt>reduction_axes</tt>. If <tt>keep_dims</tt> is true, the reduced
-- dimensions are retained with length 1.
--
-- If <tt>reduction_axes</tt> has no entries, all dimensions are reduced,
-- and a tensor with a single element is returned. Additionally, the axes
-- can be negative, which are interpreted according to the indexing rules
-- in Python.
sparseReduceSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> Tensor Build t
sparseReduceSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> Tensor Build t
-- | Computes the sum of elements across dimensions of a SparseTensor.
--
-- This Op takes a SparseTensor and is the sparse counterpart to
-- `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
-- SparseTensor.
--
-- Reduces <tt>sp_input</tt> along the dimensions given in
-- <tt>reduction_axes</tt>. Unless <tt>keep_dims</tt> is true, the rank
-- of the tensor is reduced by 1 for each entry in
-- <tt>reduction_axes</tt>. If <tt>keep_dims</tt> is true, the reduced
-- dimensions are retained with length 1.
--
-- If <tt>reduction_axes</tt> has no entries, all dimensions are reduced,
-- and a tensor with a single element is returned. Additionally, the axes
-- can be negative, which are interpreted according to the indexing rules
-- in Python.
sparseReduceSumSparse :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
sparseReduceSumSparse' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
-- | Reorders a SparseTensor into the canonical, row-major ordering.
--
-- Note that by convention, all sparse ops preserve the canonical
-- ordering along increasing dimension number. The only time ordering can
-- be violated is during manual manipulation of the indices and values
-- vectors to add entries.
--
-- Reordering does not affect the shape of the SparseTensor.
--
-- If the tensor has rank <tt>R</tt> and <tt>N</tt> non-empty values,
-- <tt>input_indices</tt> has shape `[N, R]`, input_values has length
-- <tt>N</tt>, and input_shape has length <tt>R</tt>.
sparseReorder :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build t)
sparseReorder' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build t)
-- | Reshapes a SparseTensor to represent values in a new dense shape.
--
-- This operation has the same semantics as reshape on the represented
-- dense tensor. The <tt>input_indices</tt> are recomputed based on the
-- requested <tt>new_shape</tt>.
--
-- If one component of <tt>new_shape</tt> is the special value -1, the
-- size of that dimension is computed so that the total dense size
-- remains constant. At most one component of <tt>new_shape</tt> can be
-- -1. The number of dense elements implied by <tt>new_shape</tt> must be
-- the same as the number of dense elements originally implied by
-- <tt>input_shape</tt>.
--
-- Reshaping does not affect the order of values in the SparseTensor.
--
-- If the input tensor has rank <tt>R_in</tt> and <tt>N</tt> non-empty
-- values, and <tt>new_shape</tt> has length <tt>R_out</tt>, then
-- <tt>input_indices</tt> has shape `[N, R_in]`, <tt>input_shape</tt> has
-- length <tt>R_in</tt>, <tt>output_indices</tt> has shape `[N, R_out]`,
-- and <tt>output_shape</tt> has length <tt>R_out</tt>.
sparseReshape :: Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build Int64)
sparseReshape' :: OpParams -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build Int64)
-- | Computes the mean along sparse segments of a tensor.
--
-- Read <a>the section on Segmentation</a> for an explanation of
-- segments.
--
-- Like <tt>SegmentMean</tt>, but <tt>segment_ids</tt> can have rank less
-- than `data`'s first dimension, selecting a subset of dimension 0,
-- specified by <tt>indices</tt>.
sparseSegmentMean :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
sparseSegmentMean' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
-- | Computes gradients for SparseSegmentMean.
--
-- Returns tensor "output" with same shape as grad, except for dimension
-- 0 whose value is output_dim0.
sparseSegmentMeanGrad :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
sparseSegmentMeanGrad' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
-- | Computes the sum along sparse segments of a tensor divided by the sqrt
-- of N.
--
-- N is the size of the segment being reduced.
--
-- Read <a>the section on Segmentation</a> for an explanation of
-- segments.
sparseSegmentSqrtN :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
sparseSegmentSqrtN' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
-- | Computes gradients for SparseSegmentSqrtN.
--
-- Returns tensor "output" with same shape as grad, except for dimension
-- 0 whose value is output_dim0.
sparseSegmentSqrtNGrad :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
sparseSegmentSqrtNGrad' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
-- | Computes the sum along sparse segments of a tensor.
--
-- Read <a>the section on Segmentation</a> for an explanation of
-- segments.
--
-- Like <tt>SegmentSum</tt>, but <tt>segment_ids</tt> can have rank less
-- than `data`'s first dimension, selecting a subset of dimension 0,
-- specified by <tt>indices</tt>.
--
-- For example:
--
-- ```prettyprint c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
--
-- # Select two rows, one segment. tf.sparse_segment_sum(c,
-- tf.constant([0, 1]), tf.constant([0, 0])) ==&gt; [[0 0 0 0]]
--
-- # Select two rows, two segment. tf.sparse_segment_sum(c,
-- tf.constant([0, 1]), tf.constant([0, 1])) ==&gt; [[ 1 2 3 4] [-1 -2 -3
-- -4]]
--
-- # Select all rows, two segments. tf.sparse_segment_sum(c,
-- tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) ==&gt; [[0 0 0 0] [5 6
-- 7 8]]
--
-- # Which is equivalent to: tf.segment_sum(c, tf.constant([0, 0, 1]))
-- ```
sparseSegmentSum :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
sparseSegmentSum' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
-- | Applies softmax to a batched N-D <tt>SparseTensor</tt>.
--
-- The inputs represent an N-D SparseTensor with logical shape `[..., B,
-- C]` (where `N &gt;= 2`), and with indices sorted in the canonical
-- lexicographic order.
--
-- This op is equivalent to applying the normal `tf.nn.softmax()` to each
-- innermost logical submatrix with shape `[B, C]`, but with the catch
-- that *the implicitly zero elements do not participate*. Specifically,
-- the algorithm is equivalent to the following:
--
-- <ol>
-- <li>Applies `tf.nn.softmax()` to a densified view of each innermost
-- submatrix with shape `[B, C]`, along the size-C dimension;</li>
-- <li>Masks out the original implicitly-zero locations;</li>
-- <li>Renormalizes the remaining elements.</li>
-- </ol>
--
-- Hence, the <tt>SparseTensor</tt> result has exactly the same non-zero
-- indices and shape.
sparseSoftmax :: (OneOf '[Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build t
sparseSoftmax' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build t
-- | Computes softmax cross entropy cost and gradients to backpropagate.
--
-- Unlike <tt>SoftmaxCrossEntropyWithLogits</tt>, this operation does not
-- accept a matrix of label probabilities, but rather a single label per
-- row of features. This label is considered to have probability 1.0 for
-- the given row.
--
-- Inputs are the logits, not probabilities.
sparseSoftmaxCrossEntropyWithLogits :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tlabels) => Tensor v'1 t -> Tensor v'2 tlabels -> (Tensor Build t, Tensor Build t)
sparseSoftmaxCrossEntropyWithLogits' :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tlabels) => OpParams -> Tensor v'1 t -> Tensor v'2 tlabels -> (Tensor Build t, Tensor Build t)
-- | Returns the element-wise max of two SparseTensors.
--
-- Assumes the two SparseTensors have the same shape, i.e., no
-- broadcasting.
sparseSparseMaximum :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t)
sparseSparseMaximum' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t)
-- | Returns the element-wise min of two SparseTensors.
--
-- Assumes the two SparseTensors have the same shape, i.e., no
-- broadcasting.
sparseSparseMinimum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t)
sparseSparseMinimum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t)
-- | Split a <tt>SparseTensor</tt> into <tt>num_split</tt> tensors along
-- one dimension.
--
-- If the `shape[split_dim]` is not an integer multiple of
-- <tt>num_split</tt>. Slices `[0 : shape[split_dim] % num_split]` gets
-- one extra dimension. For example, if `split_dim = 1` and `num_split =
-- 2` and the input is
--
-- input_tensor = shape = [2, 7] [ a d e ] [b c ]
--
-- Graphically the output tensors are:
--
-- output_tensor[0] = shape = [2, 4] [ a ] [b c ]
--
-- output_tensor[1] = shape = [2, 3] [ d e ] [ ]
sparseSplit :: (TensorType t) => Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64])
sparseSplit' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64])
-- | Adds up a <tt>SparseTensor</tt> and a dense <a>Tensor</a>, producing a
-- dense <a>Tensor</a>.
--
-- This Op does not require <tt>a_indices</tt> be sorted in standard
-- lexicographic order.
sparseTensorDenseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor v'4 t -> Tensor Build t
sparseTensorDenseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor v'4 t -> Tensor Build t
-- | Multiply SparseTensor (of rank 2) <a>A</a> by dense matrix <a>B</a>.
--
-- No validity checking is performed on the indices of A. However, the
-- following input format is recommended for optimal behavior:
--
-- if adjoint_a == false: A should be sorted in lexicographically
-- increasing order. Use SparseReorder if you're not sure. if adjoint_a
-- == true: A should be sorted in order of increasing dimension 1 (i.e.,
-- "column major" order instead of "row major" order).
sparseTensorDenseMatMul :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
sparseTensorDenseMatMul' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
-- | Converts a sparse representation into a dense tensor.
--
-- Builds an array <tt>dense</tt> with shape <tt>output_shape</tt> such
-- that
--
-- ```prettyprint # If sparse_indices is scalar dense[i] = (i ==
-- sparse_indices ? sparse_values : default_value)
--
-- # If sparse_indices is a vector, then for each i
-- dense[sparse_indices[i]] = sparse_values[i]
--
-- # If sparse_indices is an n by d matrix, then for each i in [0, n)
-- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] =
-- sparse_values[i] ```
--
-- All other values in <tt>dense</tt> are set to <tt>default_value</tt>.
-- If <tt>sparse_values</tt> is a scalar, all sparse indices are set to
-- this single value.
--
-- Indices should be sorted in lexicographic order, and indices must not
-- contain any repeats. If <tt>validate_indices</tt> is true, these
-- properties are checked during execution.
sparseToDense :: (TensorType t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
sparseToDense' :: (TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
-- | Applies set operation along last dimension of 2 <tt>SparseTensor</tt>
-- inputs.
--
-- See SetOperationOp::SetOperationFromContext for values of
-- <tt>set_operation</tt>.
--
-- If <tt>validate_indices</tt> is <a>True</a>,
-- <tt>SparseToSparseSetOperation</tt> validates the order and range of
-- <tt>set1</tt> and <tt>set2</tt> indices.
--
-- Input <tt>set1</tt> is a <tt>SparseTensor</tt> represented by
-- <tt>set1_indices</tt>, <tt>set1_values</tt>, and <tt>set1_shape</tt>.
-- For <tt>set1</tt> ranked <tt>n</tt>, 1st `n-1` dimensions must be the
-- same as <tt>set2</tt>. Dimension <tt>n</tt> contains values in a set,
-- duplicates are allowed but ignored.
--
-- Input <tt>set2</tt> is a <tt>SparseTensor</tt> represented by
-- <tt>set2_indices</tt>, <tt>set2_values</tt>, and <tt>set2_shape</tt>.
-- For <tt>set2</tt> ranked <tt>n</tt>, 1st `n-1` dimensions must be the
-- same as <tt>set1</tt>. Dimension <tt>n</tt> contains values in a set,
-- duplicates are allowed but ignored.
--
-- If <tt>validate_indices</tt> is <a>True</a>, this op validates the
-- order and range of <tt>set1</tt> and <tt>set2</tt> indices.
--
-- Output <tt>result</tt> is a <tt>SparseTensor</tt> represented by
-- <tt>result_indices</tt>, <tt>result_values</tt>, and
-- <tt>result_shape</tt>. For <tt>set1</tt> and <tt>set2</tt> ranked
-- <tt>n</tt>, this has rank <tt>n</tt> and the same 1st `n-1` dimensions
-- as <tt>set1</tt> and <tt>set2</tt>. The <tt>nth</tt> dimension
-- contains the result of <tt>set_operation</tt> applied to the
-- corresponding `[0...n-1]` dimension of <tt>set</tt>.
sparseToSparseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
sparseToSparseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
-- | Splits a tensor into <tt>num_split</tt> tensors along one dimension.
split :: (TensorType t) => Int64 -> Tensor v'1 Int32 -> Tensor v'2 t -> [Tensor Build t]
split' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 Int32 -> Tensor v'2 t -> [Tensor Build t]
-- | Splits a tensor into <tt>num_split</tt> tensors along one dimension.
splitV :: (TensorType t, OneOf '[Int32, Int64] tlen) => Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor v'3 Int32 -> [Tensor Build t]
splitV' :: (TensorType t, OneOf '[Int32, Int64] tlen) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor v'3 Int32 -> [Tensor Build t]
-- | Computes square root of x element-wise.
--
-- I.e., \(y = sqrt{x} = x^{1/2}\).
sqrt :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
sqrt' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the gradient for the sqrt of <tt>x</tt> wrt its input.
--
-- Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and
-- <tt>dy</tt> is the corresponding input gradient.
sqrtGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
sqrtGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Computes square of x element-wise.
--
-- I.e., \(y = x * x = x^2\).
square :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
square' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Returns (x - y)(x - y) element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>SquaredDifference</tt> supports broadcasting. More
-- about broadcasting <a>here</a></li>
-- </ul>
squaredDifference :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
squaredDifference' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Removes dimensions of size 1 from the shape of a tensor.
--
-- Given a tensor <tt>input</tt>, this operation returns a tensor of the
-- same type with all dimensions of size 1 removed. If you don't want to
-- remove all size 1 dimensions, you can remove specific size 1
-- dimensions by specifying <tt>squeeze_dims</tt>.
--
-- For example:
--
-- ```prettyprint # <tt>t</tt> is a tensor of shape [1, 2, 1, 3, 1, 1]
-- shape(squeeze(t)) ==&gt; [2, 3] ```
--
-- Or, to remove specific size 1 dimensions:
--
-- ```prettyprint # <tt>t</tt> is a tensor of shape [1, 2, 1, 3, 1, 1]
-- shape(squeeze(t, [2, 4])) ==&gt; [1, 2, 3, 1] ```
squeeze :: (TensorType t) => Tensor v'1 t -> Tensor Build t
squeeze' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | A stack that produces elements in first-in last-out order.
stack :: (MonadBuild m') => DataType -> m' (Tensor Ref ByteString)
stack' :: (MonadBuild m') => OpParams -> DataType -> m' (Tensor Ref ByteString)
-- | Delete the stack from its resource container.
stackClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode)
stackClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode)
-- | Pop the element at the top of the stack.
stackPop :: (MonadBuild m', TensorType elem_type) => Tensor Ref ByteString -> m' (Tensor Value elem_type)
stackPop' :: (MonadBuild m', TensorType elem_type) => OpParams -> Tensor Ref ByteString -> m' (Tensor Value elem_type)
-- | Push an element onto the stack.
stackPush :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> m' (Tensor Value t)
stackPush' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> m' (Tensor Value t)
-- | Stage values similar to a lightweight Enqueue. The basic functionality
-- of this
--
-- Op is similar to a queue with many fewer capabilities and options.
-- This Op is optimized for performance.
stage :: (MonadBuild m', TensorTypes dtypes) => TensorList (v'1) dtypes -> m' (ControlNode)
stage' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> TensorList (v'1) dtypes -> m' (ControlNode)
-- | Stops gradient computation.
--
-- When executed in a graph, this op outputs its input tensor as-is.
--
-- When building ops to compute gradients, this op prevents the
-- contribution of its inputs to be taken into account. Normally, the
-- gradient generator adds ops to a graph to compute the derivatives of a
-- specified <tt>loss</tt> by recursively finding out inputs that
-- contributed to its computation. If you insert this op in the graph it
-- inputs are masked from the gradient generator. They are not taken into
-- account for computing gradients.
--
-- This is useful any time you want to compute a value with TensorFlow
-- but need to pretend that the value was a constant. Some examples
-- include:
--
-- <ul>
-- <li>The *EM* algorithm where the *M-step* should not involve
-- backpropagation through the output of the *E-step*.</li>
-- <li>Contrastive divergence training of Boltzmann machines where, when
-- differentiating the energy function, the training must not
-- backpropagate through the graph that generated the samples from the
-- model.</li>
-- <li>Adversarial training, where no backprop should happen through the
-- adversarial example generation process.</li>
-- </ul>
stopGradient :: (TensorType t) => Tensor v'1 t -> Tensor Build t
stopGradient' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Return a strided slice from <tt>input</tt>.
--
-- Note, most python users will want to use the Python <a>__getitem__</a>
-- or <a>__getitem__</a> rather than this op directly.
--
-- The goal of this op is to produce a new tensor with a subset of the
-- elements from the <tt>n</tt> dimensional <tt>input</tt> tensor. The
-- subset is chosen using a sequence of <tt>m</tt> sparse range
-- specifications encoded into the arguments of this function. Note, in
-- some cases <tt>m</tt> could be equal to <tt>n</tt>, but this need not
-- be the case. Each range specification entry can be one of the
-- following:
--
-- <ul>
-- <li>An ellipsis (...). Ellipses are used to imply zero or more
-- dimensions of full-dimension selection and are produced using
-- <tt>ellipsis_mask</tt>. For example, `foo[...]` is the identity
-- slice.</li>
-- <li>A new axis. This is used to insert a new shape=1 dimension and is
-- produced using <tt>new_axis_mask</tt>. For example, `foo[:, ...]`
-- where <tt>foo</tt> is shape `(3, 4)` produces a `(1, 3, 4)`
-- tensor.</li>
-- <li>A range `begin:end:stride`. This is used to specify how much to
-- choose from a given dimension. <tt>stride</tt> can be any integer but
-- 0. <tt>begin</tt> is an integer which represents the index of the
-- first value to select while <tt>end</tt> represents the index of the
-- last value to select. The number of values selected in each dimension
-- is `end - begin` if `stride &gt; 0` and `begin - end` if `stride &lt;
-- 0`. <tt>begin</tt> and <tt>end</tt> can be negative where `-1` is the
-- last element, `-2` is the second to last. <tt>begin_mask</tt> controls
-- whether to replace the explicitly given <tt>begin</tt> with an
-- implicit effective value of `0` if `stride &gt; 0` and `-1` if `stride
-- &lt; 0`. <tt>end_mask</tt> is analogous but produces the number
-- required to create the largest open interval. For example, given a
-- shape `(3,)` tensor `foo[:]`, the effective <tt>begin</tt> and
-- <tt>end</tt> are `0` and `3`. Do not assume this is equivalent to
-- `foo[0:-1]` which has an effective <tt>begin</tt> and <tt>end</tt> of
-- `0` and `2`. Another example is `foo[-2::-1]` which reverses the first
-- dimension of a tensor while dropping the last two (in the original
-- order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is
-- `[4,3]`.</li>
-- <li>A single index. This is used to keep only elements that have a
-- given index. For example (`foo[2, :]` on a shape `(5,6)` tensor
-- produces a shape `(6,)` tensor. This is encoded in <tt>begin</tt> and
-- <tt>end</tt> and <tt>shrink_axis_mask</tt>.</li>
-- </ul>
--
-- Each conceptual range specification is encoded in the op's argument.
-- This encoding is best understand by considering a non-trivial example.
-- In particular, `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
--
-- ```prettyprint begin = [1, 2, x, x, 0, x] # x denotes don't care
-- (usually 0) end = [2, 4, x, x, -3, x] strides = [1, 1, x, x, -1, 1]
-- begin_mask = 1&lt;&lt;4 | 1 &lt;&lt; 5 = 48 end_mask = 1&lt;&lt;5 = 32
-- ellipsis_mask = 1&lt;&lt;3 = 8 new_axis_mask = 1&lt;&lt;2 4
-- shrink_axis_mask = 1&lt;&lt;0 ```
--
-- In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
-- the slice becomes (2, 1, 5, 5, 2, 5). Let us walk step by step through
-- each argument specification.
--
-- <ol>
-- <li>The first argument in the example slice is turned into `begin = 1`
-- and `end = begin + 1 = 2`. To disambiguate from the original spec
-- `2:4` we also set the appropriate bit in
-- <tt>shrink_axis_mask</tt>.</li>
-- <li>`2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks
-- have zero bits contributed.</li>
-- <li>None is a synonym for `tf.newaxis`. This means insert a dimension
-- of size 1 dimension in the final shape. Dummy values are contributed
-- to begin, end and stride, while the new_axis_mask bit is set.</li>
-- <li><tt>...</tt> grab the full ranges from as many dimensions as
-- needed to fully specify a slice for every dimension of the input
-- shape.</li>
-- <li>`:-3:-1` shows the use of negative indices. A negative index
-- <tt>i</tt> associated with a dimension that has shape <tt>s</tt> is
-- converted to a positive index `s + i`. So `-1` becomes `s-1` (i.e. the
-- last element). This conversion is done internally so begin, end and
-- strides receive x, -3, and -1. The appropriate begin_mask bit is set
-- to indicate the start range is the full range (ignoring the x).</li>
-- <li><tt>:</tt> indicates that the entire contents of the corresponding
-- dimension is selected. This is equivalent to `::` or `0::1`. begin,
-- end, and strides receive 0, 0, and 1, respectively. The appropriate
-- bits in <tt>begin_mask</tt> and <tt>end_mask</tt> are also set.</li>
-- </ol>
--
-- <ul>
-- <li>Requirements*: `0 != strides[i] for i in [0, m)` `ellipsis_mask
-- must be a power of two (only one ellipsis)`</li>
-- </ul>
stridedSlice :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor Build t
stridedSlice' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor Build t
-- | Assign <a>value</a> to the sliced l-value reference of <tt>ref</tt>.
--
-- The values of <a>value</a> are assigned to the positions in the
-- variable <tt>ref</tt> that are selected by the slice parameters. The
-- slice parameters `begin, <tt>end</tt>, <tt>strides</tt>, etc. work
-- exactly as in <tt>StridedSlice</tt>.
--
-- NOTE this op currently does not support broadcasting and so
-- <a>value</a>'s shape must be exactly the shape produced by the slice
-- of <tt>ref</tt>.
stridedSliceAssign :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) => Tensor Ref t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> m' (Tensor Ref t)
stridedSliceAssign' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor Ref t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> m' (Tensor Ref t)
-- | Returns the gradient of <tt>StridedSlice</tt>.
--
-- Since <tt>StridedSlice</tt> cuts out pieces of its <tt>input</tt>
-- which is size <a>shape</a>, its gradient will have the same shape
-- (which is passed here as <a>shape</a>). The gradient will be zero in
-- any element that the slice does not select.
--
-- Arguments are the same as StridedSliceGrad with the exception that
-- <tt>dy</tt> is the input gradient to be propagated and <a>shape</a> is
-- the shape of <tt>StridedSlice</tt>'s <tt>input</tt>.
stridedSliceGrad :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 index -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> Tensor Build t
stridedSliceGrad' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 index -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> Tensor Build t
-- | Joins the strings in the given list of string tensors into one tensor;
--
-- with the given separator (default is an empty separator).
stringJoin :: [Tensor v'1 ByteString] -> Tensor Build ByteString
stringJoin' :: OpParams -> [Tensor v'1 ByteString] -> Tensor Build ByteString
-- | Split elements of <tt>input</tt> based on <tt>delimiter</tt> into a
-- <tt>SparseTensor</tt>.
--
-- Let N be the size of source (typically N will be the batch size).
-- Split each element of <tt>input</tt> based on <tt>delimiter</tt> and
-- return a <tt>SparseTensor</tt> containing the splitted tokens. Empty
-- tokens are ignored.
--
-- <tt>delimiter</tt> can be empty, or a string of split characters. If
-- <tt>delimiter</tt> is an empty string, each element of <tt>input</tt>
-- is split into individual single-byte character strings, including
-- splitting of UTF-8 multibyte sequences. Otherwise every character of
-- <tt>delimiter</tt> is a potential split point.
--
-- For example: N = 2, input[0] is 'hello world' and input[1] is 'a b c',
-- then the output will be
--
-- indices = [0, 0; 0, 1; 1, 0; 1, 1; 1, 2] shape = [2, 3] values =
-- [<tt>hello</tt>, <tt>world</tt>, <tt>a</tt>, <tt>b</tt>, <tt>c</tt>]
stringSplit :: Tensor v'1 ByteString -> Tensor v'2 ByteString -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64)
stringSplit' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64)
-- | Converts each string in the input Tensor to its hash mod by a number
-- of buckets.
--
-- The hash function is deterministic on the content of the string within
-- the process.
--
-- Note that the hash function may change from time to time. This
-- functionality will be deprecated and it's recommended to use
-- `tf.string_to_hash_bucket_fast()` or
-- `tf.string_to_hash_bucket_strong()`.
stringToHashBucket :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
stringToHashBucket' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
-- | Converts each string in the input Tensor to its hash mod by a number
-- of buckets.
--
-- The hash function is deterministic on the content of the string within
-- the process and will never change. However, it is not suitable for
-- cryptography. This function may be used when CPU time is scarce and
-- inputs are trusted or unimportant. There is a risk of adversaries
-- constructing inputs that all hash to the same bucket. To prevent this
-- problem, use a strong hash function with
-- `tf.string_to_hash_bucket_strong`.
stringToHashBucketFast :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
stringToHashBucketFast' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
-- | Converts each string in the input Tensor to its hash mod by a number
-- of buckets.
--
-- The hash function is deterministic on the content of the string within
-- the process. The hash function is a keyed hash function, where
-- attribute <tt>key</tt> defines the key of the hash function.
-- <tt>key</tt> is an array of 2 elements.
--
-- A strong hash is important when inputs may be malicious, e.g. URLs
-- with additional components. Adversaries could try to make their inputs
-- hash to the same bucket for a denial-of-service attack or to skew the
-- results. A strong hash prevents this by making it dificult, if not
-- infeasible, to compute inputs that hash to the same bucket. This comes
-- at a cost of roughly 4x higher compute time than
-- `tf.string_to_hash_bucket_fast`.
stringToHashBucketStrong :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
stringToHashBucketStrong' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
-- | Converts each string in the input Tensor to the specified numeric
-- type.
--
-- (Note that int32 overflow results in an error while float overflow
-- results in a rounded value.)
stringToNumber :: (OneOf '[Int32, Float] out_type) => Tensor v'1 ByteString -> Tensor Build out_type
stringToNumber' :: (OneOf '[Int32, Float] out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type
-- | Returns x - y element-wise.
--
-- <ul>
-- <li>NOTE*: <tt>Sub</tt> supports broadcasting. More about broadcasting
-- <a>here</a></li>
-- </ul>
sub :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
sub' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Return substrings from <a>Tensor</a> of strings.
--
-- For each string in the input <a>Tensor</a>, creates a substring
-- starting at index <tt>pos</tt> with a total length of <tt>len</tt>.
--
-- If <tt>len</tt> defines a substring that would extend beyond the
-- length of the input string, then as many characters as possible are
-- used.
--
-- If <tt>pos</tt> is negative or specifies a character index larger than
-- any of the input strings, then an <tt>InvalidArgumentError</tt> is
-- thrown.
--
-- <tt>pos</tt> and <tt>len</tt> must have the same shape, otherwise a
-- <tt>ValueError</tt> is thrown on Op creation.
--
-- <ul>
-- <li>NOTE*: <tt>Substr</tt> supports broadcasting up to two dimensions.
-- More about broadcasting <a>here</a></li>
-- <li>--</li>
-- </ul>
--
-- Examples
--
-- Using scalar <tt>pos</tt> and <tt>len</tt>:
--
-- ``` input = [b<tt>Hello</tt>, b<tt>World</tt>] position = 1 length = 3
--
-- output = [b<tt>ell</tt>, b<tt>orl</tt>] ```
--
-- Using <tt>pos</tt> and <tt>len</tt> with same shape as <tt>input</tt>:
--
-- ``` input = [[b<tt>ten</tt>, b<tt>eleven</tt>, b<tt>twelve</tt>],
-- [b<tt>thirteen</tt>, b<tt>fourteen</tt>, b<tt>fifteen</tt>],
-- [b<tt>sixteen</tt>, b<tt>seventeen</tt>, b<tt>eighteen</tt>]] position
-- = [[1, 2, 3], [1, 2, 3], [1, 2, 3]] length = [[2, 3, 4], [4, 3, 2],
-- [5, 5, 5]]
--
-- output = [[b<tt>en</tt>, b<tt>eve</tt>, b<tt>lve</tt>],
-- [b<tt>hirt</tt>, b<tt>urt</tt>, b<tt>te</tt>], [b<tt>ixtee</tt>,
-- b<tt>vente</tt>, b<tt>hteen</tt>]] ```
--
-- Broadcasting <tt>pos</tt> and <tt>len</tt> onto <tt>input</tt>:
--
-- ``` input = [[b<tt>ten</tt>, b<tt>eleven</tt>, b<tt>twelve</tt>],
-- [b<tt>thirteen</tt>, b<tt>fourteen</tt>, b<tt>fifteen</tt>],
-- [b<tt>sixteen</tt>, b<tt>seventeen</tt>, b<tt>eighteen</tt>],
-- [b<tt>nineteen</tt>, b<tt>twenty</tt>, b<tt>twentyone</tt>]] position
-- = [1, 2, 3] length = [1, 2, 3]
--
-- output = [[b<tt>e</tt>, b<tt>ev</tt>, b<tt>lve</tt>], [b<tt>h</tt>,
-- b<tt>ur</tt>, b<tt>tee</tt>], [b<tt>i</tt>, b<tt>ve</tt>,
-- b<tt>hte</tt>], [b<tt>i</tt>, b<tt>en</tt>, b<tt>nty</tt>]] ```
--
-- Broadcasting <tt>input</tt> onto <tt>pos</tt> and <tt>len</tt>:
--
-- ``` input = b<tt>thirteen</tt> position = [1, 5, 7] length = [3, 2, 1]
--
-- output = [b<tt>hir</tt>, b<tt>ee</tt>, b'n"] ```
substr :: (OneOf '[Int32, Int64] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build ByteString
substr' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build ByteString
-- | Computes the sum of elements across dimensions of a tensor.
--
-- Reduces <tt>input</tt> along the dimensions given in
-- <tt>reduction_indices</tt>. Unless <tt>keep_dims</tt> is true, the
-- rank of the tensor is reduced by 1 for each entry in
-- <tt>reduction_indices</tt>. If <tt>keep_dims</tt> is true, the reduced
-- dimensions are retained with length 1.
sum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
sum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
-- | Computes the singular value decompositions of one or more matrices.
--
-- Computes the SVD of each inner matrix in <tt>input</tt> such that
-- `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) *
-- transpose(v[..., :, :])`
--
-- ```prettyprint # a is a tensor containing a batch of matrices. # s is
-- a tensor of singular values for each matrix. # u is the tensor
-- containing of left singular vectors for each matrix. # v is the tensor
-- containing of right singular vectors for each matrix. s, u, v = svd(a)
-- s, _, _ = svd(a, compute_uv=False) ```
svd :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t)
svd' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t)
-- | Forwards `data` to the output port determined by <a>pred</a>.
--
-- If <a>pred</a> is true, the `data` input is forwarded to
-- <tt>output_true</tt>. Otherwise, the data goes to
-- <tt>output_false</tt>.
--
-- See also <tt>RefSwitch</tt> and <tt>Merge</tt>.
switch :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Bool -> (Tensor Build t, Tensor Build t)
switch' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Bool -> (Tensor Build t, Tensor Build t)
-- | A Reader that outputs the records from a TensorFlow Records file.
tFRecordReader :: (MonadBuild m') => m' (Tensor Ref ByteString)
tFRecordReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString)
-- | A Reader that outputs the records from a TensorFlow Records file.
tFRecordReaderV2 :: (MonadBuild m') => m' (ResourceHandle)
tFRecordReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle)
-- | Read <tt>SparseTensors</tt> from a <tt>SparseTensorsMap</tt> and
-- concatenate them.
--
-- The input <tt>sparse_handles</tt> must be an <tt>int64</tt> matrix of
-- shape `[N, 1]` where <tt>N</tt> is the minibatch size and the rows
-- correspond to the output handles of <tt>AddSparseToTensorsMap</tt> or
-- <tt>AddManySparseToTensorsMap</tt>. The ranks of the original
-- <tt>SparseTensor</tt> objects that went into the given input ops must
-- all match. When the final <tt>SparseTensor</tt> is created, it has
-- rank one higher than the ranks of the incoming <tt>SparseTensor</tt>
-- objects (they have been concatenated along a new row dimension on the
-- left).
--
-- The output <tt>SparseTensor</tt> object's shape values for all
-- dimensions but the first are the max across the input
-- <tt>SparseTensor</tt> objects' shape values for the corresponding
-- dimensions. Its first shape value is <tt>N</tt>, the minibatch size.
--
-- The input <tt>SparseTensor</tt> objects' indices are assumed ordered
-- in standard lexicographic order. If this is not the case, after this
-- step run <tt>SparseReorder</tt> to restore index ordering.
--
-- For example, if the handles represent an input, which is a `[2, 3]`
-- matrix representing two original <tt>SparseTensor</tt> objects:
--
-- ``` index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] ```
--
-- and
--
-- ``` index = [ 2] [10] values = [4, 5] shape = [30] ```
--
-- then the final <tt>SparseTensor</tt> will be:
--
-- ``` index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5]
-- shape = [2 50] ```
takeManySparseFromTensorsMap :: (MonadBuild m', TensorType dtype) => Tensor v'1 Int64 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64))
takeManySparseFromTensorsMap' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 Int64 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64))
-- | Computes tan of x element-wise.
tan :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
tan' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes hyperbolic tangent of <tt>x</tt> element-wise.
tanh :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t
tanh' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Computes the gradient for the tanh of <tt>x</tt> wrt its input.
--
-- Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and
-- <tt>dy</tt> is the corresponding input gradient.
tanhGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
tanhGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Returns a tensor that may be mutated, but only persists within a
-- single step.
--
-- This is an experimental op for internal use only and it is possible to
-- use this op in unsafe ways. DO NOT USE unless you fully understand the
-- risks.
--
-- It is the caller's responsibility to ensure that <tt>ref</tt> is
-- eventually passed to a matching <tt>DestroyTemporaryVariable</tt> op
-- after all other uses have completed.
--
-- Outputs a ref to the tensor state so it may be read or modified.
--
-- E.g. var = state_ops._temporary_variable([1, 2], types.float_)
-- var_name = var.op.name var = state_ops.assign(var, [[4.0, 5.0]]) var =
-- state_ops.assign_add(var, [[6.0, 7.0]]) final =
-- state_ops._destroy_temporary_variable(var, var_name=var_name)
temporaryVariable :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype)
temporaryVariable' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype)
tensorArray :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' (Tensor Ref ByteString)
tensorArray' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' (Tensor Ref ByteString)
tensorArrayClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode)
tensorArrayClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode)
-- | Deprecated. Use TensorArrayCloseV3
tensorArrayCloseV2 :: (MonadBuild m') => Tensor v'1 ByteString -> m' (ControlNode)
tensorArrayCloseV2' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> m' (ControlNode)
-- | Delete the TensorArray from its resource container. This enables
--
-- the user to close and release the resource in the middle of a
-- step/run.
tensorArrayCloseV3 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode)
tensorArrayCloseV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode)
tensorArrayConcat :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64))
tensorArrayConcat' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64))
-- | Deprecated. Use TensorArrayConcatV3
tensorArrayConcatV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Float -> (Tensor Build dtype, Tensor Build Int64)
tensorArrayConcatV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> (Tensor Build dtype, Tensor Build Int64)
-- | Concat the elements from the TensorArray into value <a>value</a>.
--
-- Takes <tt>T</tt> elements of shapes
--
-- ``` (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1
-- x ...) ```
--
-- and concatenates them into a Tensor of shape:
--
-- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
--
-- All elements must have the same shape (excepting the first dimension).
tensorArrayConcatV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64))
tensorArrayConcatV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64))
tensorArrayGather :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
tensorArrayGather' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
-- | Deprecated. Use TensorArrayGatherV3
tensorArrayGatherV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype
tensorArrayGatherV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype
-- | Gather specific elements from the TensorArray into output
-- <a>value</a>.
--
-- All elements selected by <tt>indices</tt> must have the same shape.
tensorArrayGatherV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
tensorArrayGatherV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
tensorArrayGrad :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Ref ByteString)
tensorArrayGrad' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Ref ByteString)
-- | Deprecated. Use TensorArrayGradV3
tensorArrayGradV2 :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Value ByteString)
tensorArrayGradV2' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Value ByteString)
-- | Creates a TensorArray for storing the gradients of values in the given
-- handle.
--
-- If the given TensorArray gradient already exists, returns a reference
-- to it.
--
-- Locks the size of the original TensorArray by disabling its dynamic
-- size flag.
--
-- <ul>
-- <li>*A note about the input flow_in:**</li>
-- </ul>
--
-- The handle flow_in forces the execution of the gradient lookup to
-- occur only after certain other operations have occurred. For example,
-- when the forward TensorArray is dynamically sized, writes to this
-- TensorArray may resize the object. The gradient TensorArray is
-- statically sized based on the size of the forward TensorArray when
-- this operation executes. Furthermore, the size of the forward
-- TensorArray is frozen by this call. As a result, the flow is used to
-- ensure that the call to generate the gradient TensorArray only happens
-- after all writes are executed.
--
-- In the case of dynamically sized TensorArrays, gradient computation
-- should only be performed on read operations that have themselves been
-- chained via flow to occur only after all writes have executed. That
-- way the final size of the forward TensorArray is known when this
-- operation is called.
--
-- <ul>
-- <li>*A note about the source attribute:**</li>
-- </ul>
--
-- TensorArray gradient calls use an accumulator TensorArray object. If
-- multiple gradients are calculated and run in the same session, the
-- multiple gradient nodes may accidentally flow throuth the same
-- accumulator TensorArray. This double counts and generally breaks the
-- TensorArray gradient flow.
--
-- The solution is to identify which gradient call this particular
-- TensorArray gradient is being called in. This is performed by
-- identifying a unique string (e.g. "gradients", "gradients_1", ...)
-- from the input gradient Tensor's name. This string is used as a suffix
-- when creating the TensorArray gradient object here (the attribute
-- <tt>source</tt>).
--
-- The attribute <tt>source</tt> is added as a suffix to the forward
-- TensorArray's name when performing the creation / lookup, so that each
-- separate gradient calculation gets its own TensorArray accumulator.
tensorArrayGradV3 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 Float -> m' ((ResourceHandle, Tensor Value Float))
tensorArrayGradV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' ((ResourceHandle, Tensor Value Float))
tensorArrayPack :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype)
tensorArrayPack' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype)
tensorArrayRead :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
tensorArrayRead' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
-- | Deprecated. Use TensorArrayReadV3
tensorArrayReadV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype
tensorArrayReadV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype
-- | Read an element from the TensorArray into output <a>value</a>.
tensorArrayReadV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
tensorArrayReadV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
tensorArrayScatter :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
tensorArrayScatter' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
-- | Deprecated. Use TensorArrayScatterV3
tensorArrayScatterV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float
tensorArrayScatterV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float
-- | Scatter the data from the input value into specific TensorArray
-- elements.
--
-- <tt>indices</tt> must be a vector, its length must match the first dim
-- of <a>value</a>.
tensorArrayScatterV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
tensorArrayScatterV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
tensorArraySize :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value Int32)
tensorArraySize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value Int32)
-- | Deprecated. Use TensorArraySizeV3
tensorArraySizeV2 :: Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build Int32
tensorArraySizeV2' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build Int32
-- | Get the current size of the TensorArray.
tensorArraySizeV3 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value Int32)
tensorArraySizeV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value Int32)
tensorArraySplit :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float)
tensorArraySplit' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float)
-- | Deprecated. Use TensorArraySplitV3
tensorArraySplitV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> Tensor Build Float
tensorArraySplitV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> Tensor Build Float
-- | Split the data from the input value into TensorArray elements.
--
-- Assuming that <tt>lengths</tt> takes on values
--
-- ```(n0, n1, ..., n(T-1))```
--
-- and that <a>value</a> has shape
--
-- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
--
-- this splits values into a TensorArray with T tensors.
--
-- TensorArray index t will be the subtensor of values with starting
-- position
--
-- ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
--
-- and having size
--
-- ```nt x d0 x d1 x ...```
tensorArraySplitV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float)
tensorArraySplitV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float)
tensorArrayUnpack :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Float -> m' (Tensor Value Float)
tensorArrayUnpack' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Float -> m' (Tensor Value Float)
-- | Deprecated. Use TensorArrayV3
tensorArrayV2 :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' (Tensor Value ByteString)
tensorArrayV2' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' (Tensor Value ByteString)
-- | An array of Tensors of given size, with data written via Write and
-- read
--
-- via Read or Pack.
tensorArrayV3 :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' ((ResourceHandle, Tensor Value Float))
tensorArrayV3' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' ((ResourceHandle, Tensor Value Float))
tensorArrayWrite :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
tensorArrayWrite' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
-- | Deprecated. Use TensorArrayGradV3
tensorArrayWriteV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float
tensorArrayWriteV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float
-- | Push an element onto the tensor_array.
tensorArrayWriteV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
tensorArrayWriteV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
-- | Outputs a <tt>Summary</tt> protocol buffer with a tensor.
tensorSummary :: (TensorType t) => Tensor v'1 t -> Tensor Build ByteString
tensorSummary' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString
-- | A Reader that outputs the lines of a file delimited by '\n'.
textLineReader :: (MonadBuild m') => m' (Tensor Ref ByteString)
textLineReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString)
-- | A Reader that outputs the lines of a file delimited by '\n'.
textLineReaderV2 :: (MonadBuild m') => m' (ResourceHandle)
textLineReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle)
-- | Generates labels for candidate sampling with a learned unigram
-- distribution.
--
-- See explanations of candidate sampling and the data formats at
-- go/candidate-sampling.
--
-- For each batch, this op picks a single set of sampled candidate
-- labels.
--
-- The advantages of sampling candidates per-batch are simplicity and the
-- possibility of efficient dense matrix multiplication. The disadvantage
-- is that the sampled candidates must be chosen independently of the
-- context and of the true labels.
threadUnsafeUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
threadUnsafeUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
-- | Constructs a tensor by tiling a given tensor.
--
-- This operation creates a new tensor by replicating <tt>input</tt>
-- <tt>multiples</tt> times. The output tensor's i'th dimension has
-- `input.dims(i) * multiples[i]` elements, and the values of
-- <tt>input</tt> are replicated `multiples[i]` times along the
-- <tt>i</tt>th dimension. For example, tiling `[a b c d]` by `[2]`
-- produces `[a b c d a b c d]`.
tile :: (TensorType t, OneOf '[Int32, Int64] tmultiples) => Tensor v'1 t -> Tensor v'2 tmultiples -> Tensor Build t
tile' :: (TensorType t, OneOf '[Int32, Int64] tmultiples) => OpParams -> Tensor v'1 t -> Tensor v'2 tmultiples -> Tensor Build t
-- | Returns the gradient of <tt>Tile</tt>.
--
-- Since <tt>Tile</tt> takes an input and repeats the input
-- <tt>multiples</tt> times along each dimension, <tt>TileGrad</tt> takes
-- in <tt>multiples</tt> and aggregates each repeated tile of
-- <tt>input</tt> into <tt>output</tt>.
tileGrad :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
tileGrad' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
-- | Finds values and indices of the <tt>k</tt> largest elements for the
-- last dimension.
--
-- If the input is a vector (rank-1), finds the <tt>k</tt> largest
-- entries in the vector and outputs their values and indices as vectors.
-- Thus `values[j]` is the <tt>j</tt>-th largest entry in <tt>input</tt>,
-- and its index is `indices[j]`.
--
-- For matrices (resp. higher rank input), computes the top <tt>k</tt>
-- entries in each row (resp. vector along the last dimension). Thus,
--
-- values.shape = indices.shape = input.shape[:-1] + [k]
--
-- If two elements are equal, the lower-index element appears first.
--
-- If <tt>k</tt> varies dynamically, use <tt>TopKV2</tt> below.
topK :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Int64 -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int32)
topK' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Int64 -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int32)
-- | Finds values and indices of the <tt>k</tt> largest elements for the
-- last dimension.
--
-- If the input is a vector (rank-1), finds the <tt>k</tt> largest
-- entries in the vector and outputs their values and indices as vectors.
-- Thus `values[j]` is the <tt>j</tt>-th largest entry in <tt>input</tt>,
-- and its index is `indices[j]`.
--
-- For matrices (resp. higher rank input), computes the top <tt>k</tt>
-- entries in each row (resp. vector along the last dimension). Thus,
--
-- values.shape = indices.shape = input.shape[:-1] + [k]
--
-- If two elements are equal, the lower-index element appears first.
topKV2 :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> (Tensor Build t, Tensor Build Int32)
topKV2' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> (Tensor Build t, Tensor Build Int32)
-- | Shuffle dimensions of x according to a permutation.
--
-- The output <tt>y</tt> has the same rank as <tt>x</tt>. The shapes of
-- <tt>x</tt> and <tt>y</tt> satisfy: `y.shape[i] == x.shape[perm[i]] for
-- i in [0, 1, ..., rank(x) - 1]`
transpose :: (TensorType t, OneOf '[Int32, Int64] tperm) => Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t
transpose' :: (TensorType t, OneOf '[Int32, Int64] tperm) => OpParams -> Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t
-- | Returns x / y element-wise for integer types.
--
-- Truncation designates that negative numbers will round fractional
-- quantities toward zero. I.e. -7 / 5 = 1. This matches C semantics but
-- it is different than Python semantics. See <tt>FloorDiv</tt> for a
-- division function that matches Python Semantics.
--
-- <ul>
-- <li>NOTE*: <tt>TruncateDiv</tt> supports broadcasting. More about
-- broadcasting <a>here</a></li>
-- </ul>
truncateDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
truncateDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Returns element-wise remainder of division. This emulates C semantics
-- where
--
-- true, this follows C semantics in that the result here is consistent
-- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
--
-- <ul>
-- <li>NOTE*: <tt>Mod</tt> supports broadcasting. More about broadcasting
-- <a>here</a></li>
-- </ul>
truncateMod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
truncateMod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Outputs random values from a truncated normal distribution.
--
-- The generated values follow a normal distribution with mean 0 and
-- standard deviation 1, except that values whose magnitude is more than
-- 2 standard deviations from the mean are dropped and re-picked.
truncatedNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype)
truncatedNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype)
-- | Generates labels for candidate sampling with a uniform distribution.
--
-- See explanations of candidate sampling and the data formats at
-- go/candidate-sampling.
--
-- For each batch, this op picks a single set of sampled candidate
-- labels.
--
-- The advantages of sampling candidates per-batch are simplicity and the
-- possibility of efficient dense matrix multiplication. The disadvantage
-- is that the sampled candidates must be chosen independently of the
-- context and of the true labels.
uniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
uniformCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
-- | Finds unique elements in a 1-D tensor.
--
-- This operation returns a tensor <tt>y</tt> containing all of the
-- unique elements of <tt>x</tt> sorted in the same order that they occur
-- in <tt>x</tt>. This operation also returns a tensor <tt>idx</tt> the
-- same size as <tt>x</tt> that contains the index of each value of
-- <tt>x</tt> in the unique output <tt>y</tt>. In other words:
--
-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
--
-- For example:
--
-- ```prettyprint # tensor <tt>x</tt> is [1, 1, 2, 4, 4, 4, 7, 8, 8] y,
-- idx = unique(x) y ==&gt; [1, 2, 4, 7, 8] idx ==&gt; [0, 0, 1, 2, 2, 2,
-- 3, 4, 4] ```
unique :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx)
unique' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx)
-- | Finds unique elements in a 1-D tensor.
--
-- This operation returns a tensor <tt>y</tt> containing all of the
-- unique elements of <tt>x</tt> sorted in the same order that they occur
-- in <tt>x</tt>. This operation also returns a tensor <tt>idx</tt> the
-- same size as <tt>x</tt> that contains the index of each value of
-- <tt>x</tt> in the unique output <tt>y</tt>. Finally, it returns a
-- third tensor <tt>count</tt> that contains the count of each element of
-- <tt>y</tt> in <tt>x</tt>. In other words:
--
-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
--
-- For example:
--
-- ```prettyprint # tensor <tt>x</tt> is [1, 1, 2, 4, 4, 4, 7, 8, 8] y,
-- idx, count = unique_with_counts(x) y ==&gt; [1, 2, 4, 7, 8] idx ==&gt;
-- [0, 0, 1, 2, 2, 2, 3, 4, 4] count ==&gt; [2, 1, 3, 1, 2] ```
uniqueWithCounts :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx)
uniqueWithCounts' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx)
-- | Unpacks a given dimension of a rank-<tt>R</tt> tensor into
-- <tt>num</tt> rank-`(R-1)` tensors.
--
-- Unpacks <tt>num</tt> tensors from <a>value</a> by chipping it along
-- the <tt>axis</tt> dimension. For example, given a tensor of shape `(A,
-- B, C, D)`;
--
-- If `axis == 0` then the i'th tensor in <tt>output</tt> is the slice
-- `value[i, :, :, :]` and each tensor in <tt>output</tt> will have shape
-- `(B, C, D)`. (Note that the dimension unpacked along is gone, unlike
-- <a>split</a>).
--
-- If `axis == 1` then the i'th tensor in <tt>output</tt> is the slice
-- `value[:, i, :, :]` and each tensor in <tt>output</tt> will have shape
-- `(A, C, D)`. Etc.
--
-- This is the opposite of <a>pack</a>.
unpack :: (TensorType t) => Int64 -> Tensor v'1 t -> [Tensor Build t]
unpack' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> [Tensor Build t]
-- | Computes the sum along segments of a tensor.
--
-- Read <a>the section on Segmentation</a> for an explanation of
-- segments.
--
-- Computes a tensor such that `(output[i] = sum_{j...} data[j...]` where
-- the sum is over tuples `j...` such that `segment_ids[j...] == i`.
-- Unlike <tt>SegmentSum</tt>, <tt>segment_ids</tt> need not be sorted
-- and need not cover all values in the full range of valid values.
--
-- If the sum is empty for a given segment ID <tt>i</tt>, `output[i] =
-- 0`.
--
-- <tt>num_segments</tt> should equal the number of distinct segment IDs.
--
-- <a>style="width:70%; margin:auto; margin-bottom:10px;
-- margin-top:20px;"</a> <a>style="width:100%"
-- src="../../images/UnsortedSegmentSum.png" alt</a> <a>/div</a>
unsortedSegmentSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor v'3 Int32 -> Tensor Build t
unsortedSegmentSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor v'3 Int32 -> Tensor Build t
-- | Op is similar to a lightweight Dequeue. The basic funtionality is
-- similar to
--
-- dequeue with many fewer capabilities and options. This Op is optimized
-- for performance.
unstage :: (MonadBuild m', TensorTypes dtypes) => m' (TensorList (Value) dtypes)
unstage' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> m' (TensorList (Value) dtypes)
-- | Creates a handle to a Variable resource.
varHandleOp :: (MonadBuild m') => DataType -> Shape -> m' (ResourceHandle)
varHandleOp' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (ResourceHandle)
-- | Checks whether a resource handle-based variable has been initialized.
varIsInitializedOp :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Bool)
varIsInitializedOp' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Bool)
-- | Use VariableV2 instead.
variable :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype)
variable' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype)
-- | Holds state in the form of a tensor that persists across steps.
--
-- Outputs a ref to the tensor state so it may be read or modified.
-- TODO(zhifengc/mrry): Adds a pointer to a more detail document about
-- sharing states in tensorflow.
variableV2 :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype)
variableV2' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype)
-- | Returns locations of true values in a boolean tensor.
--
-- This operation returns the coordinates of true elements in
-- <tt>input</tt>. The coordinates are returned in a 2-D tensor where the
-- first dimension (rows) represents the number of true elements, and the
-- second dimension (columns) represents the coordinates of the true
-- elements. Keep in mind, the shape of the output tensor can vary
-- depending on how many true values there are in <tt>input</tt>. Indices
-- are output in row-major order.
--
-- For example:
--
-- ```prettyprint # <tt>input</tt> tensor is [[True, False] # [True,
-- False]] # <tt>input</tt> has two true values, so output has two
-- coordinates. # <tt>input</tt> has rank of 2, so coordinates have two
-- indices. where(input) ==&gt; [[0, 0], [1, 0]]
--
-- # <tt>input</tt> tensor is [[[True, False] # [True, False]] # [[False,
-- True] # [False, True]] # [[False, False] # [False, True]]] #
-- <tt>input</tt> has 5 true values, so output has 5 coordinates. #
-- <tt>input</tt> has rank of 3, so coordinates have three indices.
-- where(input) ==&gt; [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2,
-- 1, 1]] ```
where' :: Tensor v'1 Bool -> Tensor Build Int64
where'' :: OpParams -> Tensor v'1 Bool -> Tensor Build Int64
-- | A Reader that outputs the entire contents of a file as a value.
--
-- To use, enqueue filenames in a Queue. The output of ReaderRead will be
-- a filename (key) and the contents of that file (value).
wholeFileReader :: (MonadBuild m') => m' (Tensor Ref ByteString)
wholeFileReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString)
-- | A Reader that outputs the entire contents of a file as a value.
--
-- To use, enqueue filenames in a Queue. The output of ReaderRead will be
-- a filename (key) and the contents of that file (value).
wholeFileReaderV2 :: (MonadBuild m') => m' (ResourceHandle)
wholeFileReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle)
-- | Writes contents to the file at input filename. Creates file if not
-- existing.
writeFile :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode)
writeFile' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode)
-- | Returns a tensor of zeros with the same shape and type as x.
zerosLike :: (TensorType t) => Tensor v'1 t -> Tensor Build t
zerosLike' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t
-- | Compute the Hurwitz zeta function \(zeta(x, q)\).
--
-- The Hurwitz zeta function is defined as:
--
-- ``` zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x} ```
zeta :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
zeta' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | A graph node which represents an argument to a function.
_Arg :: (MonadBuild m', TensorType t) => Int64 -> m' (Tensor Value t)
_Arg' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> m' (Tensor Value t)
-- | Converts an array of tensors to a list of tensors.
_ArrayToList :: (TensorType t, TensorTypes out_types) => [Tensor v'1 t] -> TensorList (Build) out_types
_ArrayToList' :: (TensorType t, TensorTypes out_types) => OpParams -> [Tensor v'1 t] -> TensorList (Build) out_types
-- | Cast x of type SrcT to y of DstT.
--
-- _HostCast requires its input and produces its output in host memory.
_HostCast :: (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT
_HostCast' :: (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT
-- | Receives the named tensor from send_device on recv_device.
--
-- _HostRecv requires its input on host memory whereas _Recv requires its
-- input on device memory.
_HostRecv :: (MonadBuild m', TensorType tensor_type) => Int64 -> m' (Tensor Value tensor_type)
_HostRecv' :: (MonadBuild m', TensorType tensor_type) => OpParams -> Int64 -> m' (Tensor Value tensor_type)
-- | Sends the named tensor from send_device to recv_device.
--
-- _HostSend requires its input on host memory whereas _Send requires its
-- input on device memory.
_HostSend :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode)
_HostSend' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode)
-- | Converts a list of tensors to an array of tensors.
_ListToArray :: (TensorTypes tin, TensorType t) => Int64 -> TensorList (v'1) tin -> [Tensor Build t]
_ListToArray' :: (TensorTypes tin, TensorType t) => OpParams -> Int64 -> TensorList (v'1) tin -> [Tensor Build t]
-- | Creates an empty Tensor with shape <a>shape</a> and type
-- <tt>dtype</tt>.
--
-- The memory can optionally be initialized. This is usually useful in
-- conjunction with inplace operations.
_ParallelConcatStart :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Value dtype)
_ParallelConcatStart' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Value dtype)
-- | Updates input <a>value</a> at <tt>loc</tt> with <tt>update</tt>.
--
-- If you use this function you will almost certainly want to add a
-- control dependency as done in the implementation of parallel_stack to
-- avoid race conditions.
_ParallelConcatUpdate :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
_ParallelConcatUpdate' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
-- | Receives the named tensor from send_device on recv_device.
_Recv :: (MonadBuild m', TensorType tensor_type) => Int64 -> m' (Tensor Value tensor_type)
_Recv' :: (MonadBuild m', TensorType tensor_type) => OpParams -> Int64 -> m' (Tensor Value tensor_type)
-- | A graph node which represents a return value of a function.
_Retval :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode)
_Retval' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode)
-- | Sends the named tensor from send_device to recv_device.
_Send :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode)
_Send' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode)