-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | Haskell wrappers for Core Tensorflow Ops. -- -- Code generated signatures for the Ops in libtensorflow. @package tensorflow-core-ops @version 0.1.0.0 module TensorFlow.GenOps.Core -- | Raise a exception to abort the process when called. If -- exit_without_error is true, the process will exit normally, otherwise -- it will exit with a SIGABORT signal. -- -- Returns nothing but an exception. abort :: (MonadBuild m') => m' (ControlNode) abort' :: (MonadBuild m') => OpParams -> m' (ControlNode) -- | Computes the absolute value of a tensor. -- -- Given a tensor x, this operation returns a tensor containing -- the absolute value of each element in x. For example, if x is -- an input element and y is an output element, this operation computes -- \(y = |x|\). abs :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t abs' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Applies a gradient to a given accumulator. Does not add if local_step -- is lesser -- -- than the accumulator's global_step. accumulatorApplyGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 dtype -> m' (ControlNode) accumulatorApplyGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 dtype -> m' (ControlNode) -- | Returns the number of gradients aggregated in the given accumulators. accumulatorNumAccumulated :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) accumulatorNumAccumulated' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) -- | Updates the accumulator with a new value for global_step. Logs warning -- if the -- -- accumulator's value is already higher than new_global_step. accumulatorSetGlobalStep :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 Int64 -> m' (ControlNode) accumulatorSetGlobalStep' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int64 -> m' (ControlNode) -- | Extracts the average gradient in the given ConditionalAccumulator, -- provided -- -- that sufficient (i.e., more than num_required) gradients have been -- accumulated. The op blocks until sufficient gradients have been -- accumulated. If the accumulator has already aggregated more than -- num_required gradients, it returns the average of the accumulated -- gradients. Also automatically increments the recorded global_step in -- the accumulator by 1, and resets the aggregate to 0. accumulatorTakeGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value dtype) accumulatorTakeGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value dtype) -- | Computes acos of x element-wise. acos :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t acos' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns x + y element-wise. -- -- add :: (OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t add' :: (OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Add an N-minibatch SparseTensor to a -- SparseTensorsMap, return N handles. -- -- A SparseTensor of rank R is represented by three -- tensors: sparse_indices, sparse_values, and -- sparse_shape, where -- -- ```sparse_indices.shape[1] == sparse_shape.shape[0] == R``` -- -- An N-minibatch of SparseTensor objects is -- represented as a SparseTensor having a first -- sparse_indices column taking values between `[0, N)`, where -- the minibatch size `N == sparse_shape[0]`. -- -- The input SparseTensor must have rank R greater than -- 1, and the first dimension is treated as the minibatch dimension. -- Elements of the SparseTensor must be sorted in increasing -- order of this first dimension. The stored SparseTensor -- objects pointed to by each row of the output sparse_handles -- will have rank `R-1`. -- -- The SparseTensor values can then be read out as part of a -- minibatch by passing the given keys as vector elements to -- TakeManySparseFromTensorsMap. To ensure the correct -- SparseTensorsMap is accessed, ensure that the same -- container and shared_name are passed to that Op. If -- no shared_name is provided here, instead use the *name* of -- the Operation created by calling AddManySparseToTensorsMap as -- the shared_name passed to -- TakeManySparseFromTensorsMap. Ensure the Operations are -- colocated. addManySparseToTensorsMap :: (MonadBuild m', TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) addManySparseToTensorsMap' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) -- | Add all input tensors element wise. addN :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => [Tensor v'1 t] -> Tensor Build t addN' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> [Tensor v'1 t] -> Tensor Build t -- | Add a SparseTensor to a SparseTensorsMap return its -- handle. -- -- A SparseTensor is represented by three tensors: -- sparse_indices, sparse_values, and -- sparse_shape. -- -- This operator takes the given SparseTensor and adds it to a -- container object (a SparseTensorsMap). A unique key within -- this container is generated in the form of an int64, and this -- is the value that is returned. -- -- The SparseTensor can then be read out as part of a minibatch -- by passing the key as a vector element to -- TakeManySparseFromTensorsMap. To ensure the correct -- SparseTensorsMap is accessed, ensure that the same -- container and shared_name are passed to that Op. If -- no shared_name is provided here, instead use the *name* of -- the Operation created by calling AddSparseToTensorsMap as the -- shared_name passed to TakeManySparseFromTensorsMap. -- Ensure the Operations are colocated. addSparseToTensorsMap :: (MonadBuild m', TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) addSparseToTensorsMap' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) -- | Deprecated. Disallowed in GraphDef version >= 2. adjustContrast :: (OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor Build Float adjustContrast' :: (OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor Build Float -- | Adjust the contrast of one or more images. -- -- images is a tensor of at least 3 dimensions. The last 3 -- dimensions are interpreted as `[height, width, channels]`. The other -- dimensions only represent a collection of images, such as `[batch, -- height, width, channels].` -- -- Contrast is adjusted independently for each channel of each image. -- -- For each channel, the Op first computes the mean of the image pixels -- in the channel and then adjusts each component of each pixel to `(x - -- mean) * contrast_factor + mean`. adjustContrastv2 :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float adjustContrastv2' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float -- | Adjust the hue of one or more images. -- -- images is a tensor of at least 3 dimensions. The last -- dimension is interpretted as channels, and must be three. -- -- The input image is considered in the RGB colorspace. Conceptually, the -- RGB colors are first mapped into HSV. A delta is then applied all the -- hue values, and then remapped back to RGB colorspace. adjustHue :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float adjustHue' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float -- | Adjust the saturation of one or more images. -- -- images is a tensor of at least 3 dimensions. The last -- dimension is interpretted as channels, and must be three. -- -- The input image is considered in the RGB colorspace. Conceptually, the -- RGB colors are first mapped into HSV. A scale is then applied all the -- saturation values, and then remapped back to RGB colorspace. adjustSaturation :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float adjustSaturation' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float -- | Computes the "logical and" of elements across dimensions of a tensor. -- -- Reduces input along the dimensions given in -- reduction_indices. Unless keep_dims is true, the -- rank of the tensor is reduced by 1 for each entry in -- reduction_indices. If keep_dims is true, the reduced -- dimensions are retained with length 1. all :: (OneOf '[Int32, Int64] tidx) => Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool all' :: (OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool -- | Generates labels for candidate sampling with a learned unigram -- distribution. -- -- See explanations of candidate sampling and the data formats at -- go/candidate-sampling. -- -- For each batch, this op picks a single set of sampled candidate -- labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage -- is that the sampled candidates must be chosen independently of the -- context and of the true labels. allCandidateSampler :: Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) allCandidateSampler' :: OpParams -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -- | Computes the "logical or" of elements across dimensions of a tensor. -- -- Reduces input along the dimensions given in -- reduction_indices. Unless keep_dims is true, the -- rank of the tensor is reduced by 1 for each entry in -- reduction_indices. If keep_dims is true, the reduced -- dimensions are retained with length 1. any :: (OneOf '[Int32, Int64] tidx) => Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool any' :: (OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool -- | Update '*var' according to the adadelta scheme. -- -- accum = rho() * accum + (1 - rho()) * grad.square(); update = -- (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; -- update_accum = rho() * update_accum + (1 - rho()) * update.square(); -- var -= update; applyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (Tensor Ref t) applyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (Tensor Ref t) -- | Update '*var' according to the adagrad scheme. -- -- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) applyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> m' (Tensor Ref t) applyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> m' (Tensor Ref t) -- | Update '*var' according to the proximal adagrad scheme. applyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (Tensor Ref t) applyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (Tensor Ref t) -- | Update '*var' according to the Adam algorithm. -- -- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- -- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - -- beta2) * g_t * g_t variable <- variable - lr_t * m_t / (sqrt(v_t) + -- epsilon) applyAdam :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (Tensor Ref t) applyAdam' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (Tensor Ref t) -- | Update '*var' according to the centered RMSProp algorithm. -- -- The centered RMSProp algorithm uses an estimate of the centered second -- moment (i.e., the variance) for normalization, as opposed to regular -- RMSProp, which uses the (uncentered) second moment. This often helps -- with training, but is slightly more expensive in terms of computation -- and memory. -- -- Note that in dense implementation of this algorithm, mg, ms, and mom -- will update even if the grad is zero, but in this sparse -- implementation, mg, ms, and mom will not update in iterations during -- which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- mean_grad = decay * mean_grad + (1-decay) * gradient -- -- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - -- mean_grad ** 2) -- -- mg <- rho * mg_{t-1} + (1-rho) * grad ms <- rho * ms_{t-1} + -- (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / -- sqrt(ms - mg * mg + epsilon) var <- var - mom applyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) applyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) -- | Update '*var' according to the Ftrl-proximal scheme. -- -- accum_new = accum + grad * grad linear += grad + -- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 -- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - -- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new applyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) applyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) -- | Update '*var' by subtracting alpha * delta from it. applyGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> m' (Tensor Ref t) applyGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> m' (Tensor Ref t) -- | Update '*var' according to the momentum scheme. Set use_nesterov = -- True if you -- -- want to use Nesterov momentum. -- -- accum = accum * momentum + grad var -= lr * accum applyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) applyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) -- | Update '*var' and '*accum' according to FOBOS with Adagrad learning -- rate. -- -- accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var -- = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} applyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (Tensor Ref t) applyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (Tensor Ref t) -- | Update '*var' as FOBOS algorithm with fixed learning rate. -- -- prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) * -- max{|prox_v|-alpha*l1,0} applyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) applyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) -- | Update '*var' according to the RMSProp algorithm. -- -- Note that in dense implementation of this algorithm, ms and mom will -- update even if the grad is zero, but in this sparse implementation, ms -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = -- learning_rate * gradient / sqrt(mean_square + epsilon) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * -- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom applyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) applyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) -- | Returns the index with the largest value across dimensions of a -- tensor. argMax :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 argMax' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 -- | Returns the index with the smallest value across dimensions of a -- tensor. argMin :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 argMin' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 -- | Converts each entry in the given tensor to strings. Supports many -- numeric -- -- types and boolean. asString :: (OneOf '[Complex Float, Bool, Int32, Int64, Int8, Double, Float] t) => Tensor v'1 t -> Tensor Build ByteString asString' :: (OneOf '[Complex Float, Bool, Int32, Int64, Int8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString -- | Computes asin of x element-wise. asin :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t asin' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Asserts that the given condition is true. -- -- If condition evaluates to false, print the list of tensors in -- `data`. summarize determines how many entries of the tensors -- to print. assert :: (MonadBuild m', TensorTypes t) => Tensor v'1 Bool -> TensorList (v'2) t -> m' (ControlNode) assert' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 Bool -> TensorList (v'2) t -> m' (ControlNode) -- | Update ref by assigning value to it. -- -- This operation outputs "ref" after the assignment is done. This makes -- it easier to chain operations that need to use the reset value. assign :: (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) assign' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) -- | Update ref by adding value to it. -- -- This operation outputs "ref" after the update is done. This makes it -- easier to chain operations that need to use the reset value. assignAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) assignAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) -- | Adds a value to the current value of a variable. -- -- Any ReadVariableOp which depends directly or indirectly on this assign -- is guaranteed to see the incremented value or a subsequent newer one. -- -- Outputs the incremented value, which can be used to totally order the -- increments to this variable. assignAddVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) assignAddVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) -- | Update ref by subtracting value from it. -- -- This operation outputs "ref" after the update is done. This makes it -- easier to chain operations that need to use the reset value. assignSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) assignSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) -- | Assigns a new value to a variable. -- -- Any ReadVariableOp with a control dependency on this op is guaranteed -- to return this value or a subsequent newer value of the variable. assignVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) assignVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) -- | Computes atan of x element-wise. atan :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t atan' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Outputs a Summary protocol buffer with audio. -- -- The summary has up to max_outputs summary values containing -- audio. The audio is built from tensor which must be 3-D with -- shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, -- frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` -- with a sample rate of sample_rate. -- -- The tag argument is a scalar Tensor of type -- string. It is used to build the tag of the summary -- values: -- -- audioSummary :: Float -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build ByteString audioSummary' :: OpParams -> Float -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build ByteString -- | Outputs a Summary protocol buffer with audio. -- -- The summary has up to max_outputs summary values containing -- audio. The audio is built from tensor which must be 3-D with -- shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, -- frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` -- with a sample rate of sample_rate. -- -- The tag argument is a scalar Tensor of type -- string. It is used to build the tag of the summary -- values: -- -- audioSummaryV2 :: Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build ByteString audioSummaryV2' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build ByteString -- | Performs average pooling on the input. -- -- Each entry in output is the mean of the corresponding size -- ksize window in value. avgPool :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t avgPool' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Performs 3D average pooling on the input. avgPool3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t avgPool3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes gradients of average pooling function. avgPool3DGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t avgPool3DGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t -- | Computes gradients of the average pooling function. avgPoolGrad :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t avgPoolGrad' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t -- | Defines a barrier that persists across different graph executions. -- -- A barrier represents a key-value map, where each key is a string, and -- each value is a tuple of tensors. -- -- At runtime, the barrier contains complete and -- incomplete elements. A complete element has defined tensors -- for all components of its value tuple, and may be accessed using -- BarrierTakeMany. An incomplete element has some undefined components -- in its value tuple, and may be updated using BarrierInsertMany. barrier :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) barrier' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) -- | Closes the given barrier. -- -- This operation signals that no more new elements will be inserted in -- the given barrier. Subsequent InsertMany that try to introduce a new -- key will fail. Subsequent InsertMany operations that just add missing -- components to already existing elements will continue to succeed. -- Subsequent TakeMany operations will continue to succeed if sufficient -- completed elements remain in the barrier. Subsequent TakeMany -- operations that would block will fail immediately. barrierClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) barrierClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) -- | Computes the number of incomplete elements in the given barrier. barrierIncompleteSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) barrierIncompleteSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) -- | For each key, assigns the respective value to the specified component. -- -- If a key is not found in the barrier, this operation will create a new -- incomplete element. If a key is found in the barrier, and the element -- already has a value at component_index, this operation will fail with -- INVALID_ARGUMENT, and leave the barrier in an undefined state. barrierInsertMany :: (MonadBuild m', TensorType t) => Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> Tensor v'3 t -> m' (ControlNode) barrierInsertMany' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> Tensor v'3 t -> m' (ControlNode) -- | Computes the number of complete elements in the given barrier. barrierReadySize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) barrierReadySize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) -- | Takes the given number of completed elements from a barrier. -- -- This operation concatenates completed-element component tensors along -- the 0th dimension to make a single component tensor. -- -- Elements come out of the barrier when they are complete, and in the -- order in which they were placed into the barrier. The indices output -- provides information about the batch in which each element was -- originally inserted into the barrier. barrierTakeMany :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value ByteString, TensorList (Value) component_types)) barrierTakeMany' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value ByteString, TensorList (Value) component_types)) batchCholesky :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t batchCholesky' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t batchCholeskyGrad :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t batchCholeskyGrad' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t batchFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) batchFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) batchFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) batchFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) batchFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) batchFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) batchIFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) batchIFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) batchIFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) batchIFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) batchIFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) batchIFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -- | Multiplies slices of two tensors in batches. -- -- Multiplies all slices of Tensor x and y (each -- slice can be viewed as an element of a batch), and arranges the -- individual results in a single output tensor of the same batch size. -- Each of the individual slices can optionally be adjointed (to adjoint -- a matrix means to transpose and conjugate it) before multiplication by -- setting the adj_x or adj_y flag to True, -- which are by default False. -- -- The input tensors x and y are 3-D or higher with -- shape `[..., r_x, c_x]` and `[..., r_y, c_y]`. -- -- The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, -- where: -- -- r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y -- -- It is computed as: -- -- output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) batchMatMul :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t batchMatMul' :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t batchMatrixBandPart :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t batchMatrixBandPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t batchMatrixDeterminant :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t batchMatrixDeterminant' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t batchMatrixDiag :: (TensorType t) => Tensor v'1 t -> Tensor Build t batchMatrixDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t batchMatrixDiagPart :: (TensorType t) => Tensor v'1 t -> Tensor Build t batchMatrixDiagPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t batchMatrixInverse :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t batchMatrixInverse' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t batchMatrixSetDiag :: (TensorType t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t batchMatrixSetDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t batchMatrixSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t batchMatrixSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t batchMatrixSolveLs :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t batchMatrixSolveLs' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t batchMatrixTriangularSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t batchMatrixTriangularSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Batch normalization. -- -- This op is deprecated. Prefer `tf.nn.batch_normalization`. batchNormWithGlobalNormalization :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor Build t batchNormWithGlobalNormalization' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor Build t -- | Gradients for batch normalization. -- -- This op is deprecated. See `tf.nn.batch_normalization`. batchNormWithGlobalNormalizationGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) batchNormWithGlobalNormalizationGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) batchSelfAdjointEig :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t batchSelfAdjointEig' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t batchSelfAdjointEigV2 :: (OneOf '[Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t) batchSelfAdjointEigV2' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t) batchSvd :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) batchSvd' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) -- | BatchToSpace for 4-D tensors of type T. -- -- This is a legacy version of the more general BatchToSpaceND. -- -- Rearranges (permutes) data from batch into blocks of spatial data, -- followed by cropping. This is the reverse transformation of -- SpaceToBatch. More specifically, this op outputs a copy of the input -- tensor where values from the batch dimension are moved in -- spatial blocks to the height and width dimensions, -- followed by cropping along the height and width -- dimensions. batchToSpace :: (TensorType t, OneOf '[Int32, Int64] tidx) => Int64 -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t batchToSpace' :: (TensorType t, OneOf '[Int32, Int64] tidx) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -- | BatchToSpace for N-D tensors of type T. -- -- This operation reshapes the "batch" dimension 0 into `M + 1` -- dimensions of shape `block_shape + [batch]`, interleaves these blocks -- back into the grid defined by the spatial dimensions `[1, ..., M]`, to -- obtain a result with the same rank as the input. The spatial -- dimensions of this intermediate result are then optionally cropped -- according to crops to produce the output. This is the reverse -- of SpaceToBatch. See below for a precise description. batchToSpaceND :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tcrops) => Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tcrops -> Tensor Build t batchToSpaceND' :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tcrops) => OpParams -> Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tcrops -> Tensor Build t -- | Compute the regularized incomplete beta integral \(I_x(a, b)\). -- -- The regularized incomplete beta integral is defined as: -- -- ``` I_x(a, b) = frac{B(x; a, b)}{B(a, b)} ``` where -- -- ``` B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt ``` -- -- is the incomplete beta function and \(B(a, b)\) is the *complete* beta -- function. betainc :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t betainc' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -- | Adds bias to value. -- -- This is a special case of `tf.add` where bias is restricted -- to be 1-D. Broadcasting is supported, so value may have any -- number of dimensions. biasAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t biasAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | The backward operation for BiasAdd on the "bias" tensor. -- -- It accumulates all the values from out_backprop into the feature -- dimension. For NHWC data format, the feature dimension is the last. -- For NCHW data format, the feature dimension is the third-to-last. biasAddGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t biasAddGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Adds bias to value. -- -- This is a deprecated version of BiasAdd and will be soon removed. -- -- This is a special case of `tf.add` where bias is restricted -- to be 1-D. Broadcasting is supported, so value may have any -- number of dimensions. biasAddV1 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t biasAddV1' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Bitcasts a tensor from one type to another without copying data. -- -- Given a tensor input, this operation returns a tensor that -- has the same buffer data as input with datatype `type`. -- -- If the input datatype T is larger than the output datatype -- `type` then the shape changes from [...] to [..., -- sizeof(T)/sizeof(`type`)]. -- -- If T is smaller than `type`, the operator requires that the -- rightmost dimension be equal to sizeof(`type`)/sizeof(T). The -- shape then goes from [..., sizeof(`type`)/sizeof(T)] to -- [...]. -- -- bitcast :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] type') => Tensor v'1 t -> Tensor Build type' bitcast' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] type') => OpParams -> Tensor v'1 t -> Tensor Build type' -- | Return the shape of s0 op s1 with broadcast. -- -- Given s0 and s1, tensors that represent shapes, -- compute r0, the broadcasted shape. s0, s1 -- and r0 are all integer vectors. broadcastArgs :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t broadcastArgs' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Return the reduction indices for computing gradients of s0 op s1 with -- broadcast. -- -- This is typically used by gradient computations for a broadcasting -- operation. broadcastGradientArgs :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) broadcastGradientArgs' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) -- | Performs beam search decoding on the logits given in input. -- -- A note about the attribute merge_repeated: For the beam search -- decoder, this means that if consecutive entries in a beam are the -- same, only the first of these is emitted. That is, when the top path -- is "A B B B B", "A B" is returned if merge_repeated = True but "A B B -- B B" is returned if merge_repeated = False. cTCBeamSearchDecoder :: Int64 -> Int64 -> Tensor v'1 Float -> Tensor v'2 Int32 -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float) cTCBeamSearchDecoder' :: OpParams -> Int64 -> Int64 -> Tensor v'1 Float -> Tensor v'2 Int32 -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float) -- | Performs greedy decoding on the logits given in inputs. -- -- A note about the attribute merge_repeated: if enabled, when -- consecutive logits' maximum indices are the same, only the first of -- these is emitted. Labeling the blank *, the sequence "A B B * B -- B" becomes "A B" if merge_repeated = True and "A B B B B" if -- merge_repeated = False. -- -- Regardless of the value of merge_repeated, if the maximum index of a -- given time and batch corresponds to the blank, index `(num_classes - -- 1)`, no new element is emitted. cTCGreedyDecoder :: Tensor v'1 Float -> Tensor v'2 Int32 -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float) cTCGreedyDecoder' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int32 -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float) -- | Calculates the CTC Loss (log probability) for each batch entry. Also -- calculates -- -- the gradient. This class performs the softmax operation for you, so -- inputs should be e.g. linear projections of outputs by an LSTM. cTCLoss :: Tensor v'1 Float -> Tensor v'2 Int64 -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> (Tensor Build Float, Tensor Build Float) cTCLoss' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int64 -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> (Tensor Build Float, Tensor Build Float) -- | Cast x of type SrcT to y of DstT. cast :: (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT cast' :: (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT -- | Returns element-wise smallest integer in not less than x. ceil :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t ceil' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Checks a tensor for NaN and Inf values. -- -- When run, reports an InvalidArgument error if tensor -- has any values that are not a number (NaN) or infinity (Inf). -- Otherwise, passes tensor as-is. checkNumerics :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t checkNumerics' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the Cholesky decomposition of one or more square matrices. -- -- The input is a tensor of shape `[..., M, M]` whose inner-most 2 -- dimensions form square matrices, with the same constraints as the -- single matrix Cholesky decomposition above. The output is a tensor of -- the same shape as the input containing the Cholesky decompositions for -- all input submatrices `[..., :, :]`. cholesky :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t cholesky' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the reverse mode backpropagated gradient of the Cholesky -- algorithm. -- -- For an explanation see "Differentiation of the Cholesky algorithm" by -- Iain Murray http://arxiv.org/abs/1602.07527. choleskyGrad :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t choleskyGrad' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Converts two real numbers to a complex number. -- -- Given a tensor real representing the real part of a complex -- number, and a tensor imag representing the imaginary part of a -- complex number, this operation returns complex numbers elementwise of -- the form \(a + bj\), where *a* represents the real part and *b* -- represents the imag part. -- -- The input tensors real and imag must have the same -- shape. -- -- For example: -- -- ``` # tensor real is [2.25, 3.25] # tensor imag is -- [4.75, 5.75] tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + -- 5.75j]] ``` complex :: (OneOf '[Double, Float] t, OneOf '[Complex Double, Complex Float] tout) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build tout complex' :: (OneOf '[Double, Float] t, OneOf '[Complex Double, Complex Float] tout) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build tout -- | Computes the complex absolute value of a tensor. -- -- Given a tensor x of complex numbers, this operation returns a -- tensor of type float or double that is the absolute -- value of each element in x. All elements in x must -- be complex numbers of the form \(a + bj\). The absolute value is -- computed as \( sqrt{a^2 + b^2}\). complexAbs :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout complexAbs' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout -- | Computes the ids of the positions in sampled_candidates that match -- true_labels. -- -- When doing log-odds NCE, the result of this op should be passed -- through a SparseToDense op, then added to the logits of the sampled -- candidates. This has the effect of removing the sampled -- labels that match the true labels by making the classifier sure that -- they are sampled labels. computeAccidentalHits :: Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float) computeAccidentalHits' :: OpParams -> Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float) -- | Concatenates tensors along one dimension. concat :: (TensorType t) => Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t concat' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t -- | Computes offsets of concat inputs within its output. -- -- For example: -- -- ```prettyprint # x is [2, 2, 7] # y is [2, 3, 7] # -- z is [2, 5, 7] concat_offset(2, [x, y, z]) => [0, 0, 0], -- [0, 2, 0], [0, 5, 0] ``` concatOffset :: Tensor v'1 Int32 -> [Tensor v'2 Int32] -> [Tensor Build Int32] concatOffset' :: OpParams -> Tensor v'1 Int32 -> [Tensor v'2 Int32] -> [Tensor Build Int32] -- | Concatenates tensors along one dimension. concatV2 :: (TensorType t, OneOf '[Int32, Int64] tidx) => [Tensor v'1 t] -> Tensor v'2 tidx -> Tensor Build t concatV2' :: (TensorType t, OneOf '[Int32, Int64] tidx) => OpParams -> [Tensor v'1 t] -> Tensor v'2 tidx -> Tensor Build t -- | A conditional accumulator for aggregating gradients. The accumulator -- accepts -- -- gradients marked with local_step greater or equal to the most recent -- global_step known to the accumulator. The average can be extracted -- from the accumulator, provided sufficient gradients have been -- accumulated. Extracting the average automatically resets the aggregate -- to 0, and increments the global_step recorded by the accumulator. conditionalAccumulator :: (MonadBuild m') => DataType -> Shape -> m' (Tensor Ref ByteString) conditionalAccumulator' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (Tensor Ref ByteString) -- | Returns the complex conjugate of a complex number. -- -- Given a tensor input of complex numbers, this operation -- returns a tensor of complex numbers that are the complex conjugate of -- each element in input. The complex numbers in input -- must be of the form \(a + bj\), where *a* is the real part and *b* is -- the imaginary part. -- -- The complex conjugate returned by this operation is of the form \(a - -- bj\). -- -- For example: -- -- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] -- tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] ``` conj :: (OneOf '[Complex Double, Complex Float] t) => Tensor v'1 t -> Tensor Build t conj' :: (OneOf '[Complex Double, Complex Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns a constant tensor. const :: (TensorType dtype) => Tensor Build dtype const' :: (TensorType dtype) => OpParams -> Tensor Build dtype -- | Does nothing. Serves as a control trigger for scheduling. -- -- Only useful as a placeholder for control edges. controlTrigger :: (MonadBuild m') => m' (ControlNode) controlTrigger' :: (MonadBuild m') => OpParams -> m' (ControlNode) -- | Computes a 2-D convolution given 4-D input and filter -- tensors. -- -- Given an input tensor of shape `[batch, in_height, in_width, -- in_channels]` and a filter / kernel tensor of shape `[filter_height, -- filter_width, in_channels, out_channels]`, this op performs the -- following: -- --
    --
  1. Flattens the filter to a 2-D matrix with shape `[filter_height * -- filter_width * in_channels, output_channels]`.
  2. --
  3. Extracts image patches from the input tensor to form a *virtual* -- tensor of shape `[batch, out_height, out_width, filter_height * -- filter_width * in_channels]`.
  4. --
  5. For each patch, right-multiplies the filter matrix and the image -- patch vector.
  6. --
-- -- In detail, with the default NHWC format, -- -- output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, -- strides[2] * j + dj, q] * filter[di, dj, q, k] -- -- Must have `strides[0] = strides[3] = 1`. For the most common case of -- the same horizontal and vertices strides, `strides = [1, stride, -- stride, 1]`. conv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t conv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes the gradients of convolution with respect to the filter. conv2DBackpropFilter :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t conv2DBackpropFilter' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t -- | Computes the gradients of convolution with respect to the input. conv2DBackpropInput :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t conv2DBackpropInput' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -- | Computes a 3-D convolution given 5-D input and filter -- tensors. -- -- In signal processing, cross-correlation is a measure of similarity of -- two waveforms as a function of a time-lag applied to one of them. This -- is also known as a sliding dot product or sliding inner-product. -- -- Our Conv3D implements a form of cross-correlation. conv3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t conv3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes the gradients of 3-D convolution with respect to the filter. conv3DBackpropFilter :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t conv3DBackpropFilter' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -- | Computes the gradients of 3-D convolution with respect to the filter. conv3DBackpropFilterV2 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t conv3DBackpropFilterV2' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t -- | Computes the gradients of 3-D convolution with respect to the input. conv3DBackpropInput :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t conv3DBackpropInput' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -- | Computes the gradients of 3-D convolution with respect to the input. conv3DBackpropInputV2 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t conv3DBackpropInputV2' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -- | Copy Op. -- -- Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on -- the device on which the tensor is allocated. -- -- Unlike the CopyHost Op, this op does not have HostMemory constraint on -- its input or output. copy :: (TensorType t) => Tensor v'1 t -> Tensor Build t copy' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Copy Host Op. -- -- Performs CPU-to-CPU deep-copying of tensor. -- -- Unlike the Copy Op, this op has HostMemory constraint on its input or -- output. copyHost :: (TensorType t) => Tensor v'1 t -> Tensor Build t copyHost' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes cos of x element-wise. cos :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t cos' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Increments ref until it reaches limit. countUpTo :: (MonadBuild m', OneOf '[Int32, Int64] t) => Int64 -> Tensor Ref t -> m' (Tensor Value t) countUpTo' :: (MonadBuild m', OneOf '[Int32, Int64] t) => OpParams -> Int64 -> Tensor Ref t -> m' (Tensor Value t) -- | Extracts crops from the input image tensor and bilinearly resizes them -- (possibly -- -- with aspect ratio change) to a common output size specified by -- crop_size. This is more general than the -- crop_to_bounding_box op which extracts a fixed size slice -- from the input image and does not allow resizing or aspect ratio -- change. -- -- Returns a tensor with crops from the input image at -- positions defined at the bounding box locations in boxes. The -- cropped boxes are all resized (with bilinear interpolation) to a fixed -- `size = [crop_height, crop_width]`. The result is a 4-D tensor -- `[num_boxes, crop_height, crop_width, depth]`. cropAndResize :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build Float cropAndResize' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build Float -- | Computes the gradient of the crop_and_resize op wrt the input boxes -- tensor. cropAndResizeGradBoxes :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 t -> Tensor v'3 Float -> Tensor v'4 Int32 -> Tensor Build Float cropAndResizeGradBoxes' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 t -> Tensor v'3 Float -> Tensor v'4 Int32 -> Tensor Build Float -- | Computes the gradient of the crop_and_resize op wrt the input image -- tensor. cropAndResizeGradImage :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t cropAndResizeGradImage' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t -- | Compute the pairwise cross product. -- -- a and b must be the same shape; they can either be -- simple 3-element vectors, or any shape where the innermost dimension -- is 3. In the latter case, each pair of corresponding 3-element vectors -- is cross-multiplied independently. cross :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t cross' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Compute the cumulative product of the tensor x along -- axis. -- -- By default, this op performs an inclusive cumprod, which means that -- the first element of the input is identical to the first element of -- the output: ```prettyprint tf.cumprod([a, b, c]) ==> [a, a * b, a * -- b * c] ``` -- -- By setting the exclusive kwarg to True, an exclusive -- cumprod is performed instead: ```prettyprint tf.cumprod([a, b, c], -- exclusive=True) ==> [0, a, a * b] ``` -- -- By setting the reverse kwarg to True, the cumprod is -- performed in the opposite direction: ```prettyprint tf.cumprod([a, b, -- c], reverse=True) ==> [a * b * c, b * c, c] ``` This is more -- efficient than using separate `tf.reverse` ops. -- -- The reverse and exclusive kwargs can also be combined: -- ```prettyprint tf.cumprod([a, b, c], exclusive=True, reverse=True) -- ==> [b * c, c, 0] ``` cumprod :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t cumprod' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -- | Compute the cumulative sum of the tensor x along -- axis. -- -- By default, this op performs an inclusive cumsum, which means that the -- first element of the input is identical to the first element of the -- output: ```prettyprint tf.cumsum([a, b, c]) ==> [a, a + b, a + b + -- c] ``` -- -- By setting the exclusive kwarg to True, an exclusive -- cumsum is performed instead: ```prettyprint tf.cumsum([a, b, c], -- exclusive=True) ==> [0, a, a + b] ``` -- -- By setting the reverse kwarg to True, the cumsum is -- performed in the opposite direction: ```prettyprint tf.cumsum([a, b, -- c], reverse=True) ==> [a + b + c, b + c, c] ``` This is more -- efficient than using separate `tf.reverse` ops. -- -- The reverse and exclusive kwargs can also be combined: -- ```prettyprint tf.cumsum([a, b, c], exclusive=True, reverse=True) -- ==> [b + c, c, 0] ``` cumsum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t cumsum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -- | Debug Identity Op. -- -- Provides an identity mapping of the non-Ref type input tensor for -- debugging. debugIdentity :: (TensorType t) => Tensor v'1 t -> Tensor Build t debugIdentity' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Debug NaN Value Counter Op -- -- Counts number of NaNs in the input tensor, for debugging. debugNanCount :: (TensorType t) => Tensor v'1 t -> Tensor Build Int64 debugNanCount' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Int64 -- | Debug Numeric Summary Op. -- -- Provide a basic summary of numeric value types, range and -- distribution. debugNumericSummary :: (TensorType t) => Tensor v'1 t -> Tensor Build Double debugNumericSummary' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Double -- | Decode web-safe base64-encoded strings. -- -- Input may or may not have padding at the end. See EncodeBase64 for -- padding. Web-safe means that input must use - and _ instead of + and -- /. decodeBase64 :: Tensor v'1 ByteString -> Tensor Build ByteString decodeBase64' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString -- | Convert CSV records to tensors. Each column maps to one tensor. -- -- RFC 4180 format is expected for the CSV records. -- (https:/tools.ietf.orghtml/rfc4180) Note that we allow leading -- and trailing spaces with int or float field. decodeCSV :: (OneOfs '[ByteString, Int32, Int64, Float] oUT_TYPE) => Tensor v'1 ByteString -> TensorList (v'2) oUT_TYPE -> TensorList (Build) oUT_TYPE decodeCSV' :: (OneOfs '[ByteString, Int32, Int64, Float] oUT_TYPE) => OpParams -> Tensor v'1 ByteString -> TensorList (v'2) oUT_TYPE -> TensorList (Build) oUT_TYPE -- | Decode the first frame of a GIF-encoded image to a uint8 tensor. -- -- GIF with frame or transparency compression are not supported convert -- animated GIF from compressed to uncompressed by: -- -- convert $src.gif -coalesce $dst.gif decodeGif :: Tensor v'1 ByteString -> Tensor Build Word8 decodeGif' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Word8 -- | Convert JSON-encoded Example records to binary protocol buffer -- strings. -- -- This op translates a tensor containing Example records, encoded using -- the standard JSON mapping, into a tensor containing the same -- records encoded as binary protocol buffers. The resulting tensor can -- then be fed to any of the other Example-parsing ops. decodeJSONExample :: Tensor v'1 ByteString -> Tensor Build ByteString decodeJSONExample' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString -- | Decode a JPEG-encoded image to a uint8 tensor. -- -- The attr channels indicates the desired number of color -- channels for the decoded image. -- -- Accepted values are: -- -- -- -- If needed, the JPEG-encoded image is transformed to match the -- requested number of color channels. -- -- The attr ratio allows downscaling the image by an integer -- factor during decoding. Allowed values are: 1, 2, 4, and 8. This is -- much faster than downscaling the image later. decodeJpeg :: Tensor v'1 ByteString -> Tensor Build Word8 decodeJpeg' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Word8 -- | Decode a PNG-encoded image to a uint8 or uint16 tensor. -- -- The attr channels indicates the desired number of color -- channels for the decoded image. -- -- Accepted values are: -- -- -- -- If needed, the PNG-encoded image is transformed to match the requested -- number of color channels. decodePng :: (OneOf '[Word16, Word8] dtype) => Tensor v'1 ByteString -> Tensor Build dtype decodePng' :: (OneOf '[Word16, Word8] dtype) => OpParams -> Tensor v'1 ByteString -> Tensor Build dtype -- | Reinterpret the bytes of a string as a vector of numbers. decodeRaw :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] out_type) => Tensor v'1 ByteString -> Tensor Build out_type decodeRaw' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type -- | Delete the tensor specified by its handle in the session. deleteSessionTensor :: (MonadBuild m') => Tensor v'1 ByteString -> m' (ControlNode) deleteSessionTensor' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> m' (ControlNode) -- | Applies set operation along last dimension of 2 Tensor inputs. -- -- See SetOperationOp::SetOperationFromContext for values of -- set_operation. -- -- Output result is a SparseTensor represented by -- result_indices, result_values, and -- result_shape. For set1 and set2 ranked -- n, this has rank n and the same 1st `n-1` dimensions -- as set1 and set2. The nth dimension -- contains the result of set_operation applied to the -- corresponding `[0...n-1]` dimension of set. denseToDenseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) denseToDenseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -- | Applies set operation along last dimension of Tensor and -- SparseTensor. -- -- See SetOperationOp::SetOperationFromContext for values of -- set_operation. -- -- Input set2 is a SparseTensor represented by -- set2_indices, set2_values, and set2_shape. -- For set2 ranked n, 1st `n-1` dimensions must be the -- same as set1. Dimension n contains values in a set, -- duplicates are allowed but ignored. -- -- If validate_indices is True, this op validates the -- order and range of set2 indices. -- -- Output result is a SparseTensor represented by -- result_indices, result_values, and -- result_shape. For set1 and set2 ranked -- n, this has rank n and the same 1st `n-1` dimensions -- as set1 and set2. The nth dimension -- contains the result of set_operation applied to the -- corresponding `[0...n-1]` dimension of set. denseToSparseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) denseToSparseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -- | DepthToSpace for tensors of type T. -- -- Rearranges data from depth into blocks of spatial data. This is the -- reverse transformation of SpaceToDepth. More specifically, this op -- outputs a copy of the input tensor where values from the -- depth dimension are moved in spatial blocks to the -- height and width dimensions. The attr -- block_size indicates the input block size and how the data is -- moved. -- -- -- -- That is, assuming the input is in the shape: `[batch, height, width, -- depth]`, the shape of the output will be: `[batch, height*block_size, -- width*block_size, depth/(block_size*block_size)]` -- -- This operation requires that the input tensor be of rank 4, and that -- block_size be >=1 and that `block_size * block_size` be a -- divisor of the input depth. -- -- This operation is useful for resizing the activations between -- convolutions (but keeping all data), e.g. instead of pooling. It is -- also useful for training purely convolutional models. -- -- For example, given this input of shape `[1, 1, 1, 4]`, and a block -- size of 2: -- -- ```prettyprint x = [[[[1, 2, 3, 4]]]] -- -- ``` -- -- This operation will output a tensor of shape `[1, 2, 2, 1]`: -- -- ```prettyprint [[[[1], [2]], [[3], [4]]]] ``` -- -- Here, the input has a batch of 1 and each batch element has shape `[1, -- 1, 4]`, the corresponding output will have 2x2 elements and will have -- a depth of 1 channel (1 = `4 / (block_size * block_size)`). The output -- element shape is `[2, 2, 1]`. -- -- For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, -- e.g. -- -- ```prettyprint x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ``` -- -- This operation, for block size of 2, will return the following tensor -- of shape `[1, 2, 2, 3]` -- -- ```prettyprint [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] -- -- ``` -- -- Similarly, for the following input of shape `[1 2 2 4]`, and a block -- size of 2: -- -- ```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], -- [13, 14, 15, 16]]]] ``` -- -- the operator will return the following tensor of shape `[1 4 4 1]`: -- -- ```prettyprint x = [[ [1], [2], [5], [6]], [ [3], [4], [7], [8]], [ -- [9], [10], [13], [14]], [ [11], [12], [15], [16]]] -- -- ``` depthToSpace :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor Build t depthToSpace' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor Build t -- | Computes a 2-D depthwise convolution given 4-D input and -- filter tensors. -- -- Given an input tensor of shape `[batch, in_height, in_width, -- in_channels]` and a filter / kernel tensor of shape `[filter_height, -- filter_width, in_channels, channel_multiplier]`, containing -- in_channels convolutional filters of depth 1, -- depthwise_conv2d applies a different filter to each input -- channel (expanding from 1 channel to channel_multiplier -- channels for each), then concatenates the results together. Thus, the -- output has `in_channels * channel_multiplier` channels. -- -- for k in 0..in_channels-1 for q in 0..channel_multiplier-1 output[b, -- i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * -- i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] -- -- Must have `strides[0] = strides[3] = 1`. For the most common case of -- the same horizontal and vertices strides, `strides = [1, stride, -- stride, 1]`. depthwiseConv2dNative :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t depthwiseConv2dNative' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes the gradients of depthwise convolution with respect to the -- filter. depthwiseConv2dNativeBackpropFilter :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t depthwiseConv2dNativeBackpropFilter' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t -- | Computes the gradients of depthwise convolution with respect to the -- input. depthwiseConv2dNativeBackpropInput :: (OneOf '[Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t depthwiseConv2dNativeBackpropInput' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -- | Dequantize the input tensor into a float Tensor. -- -- -- -- In MIN_COMBINED mode, each value of the tensor will undergo -- the following: -- -- ``` if T == qint8, in[i] += (range(T) + 1)/ 2.0 out[i] = min_range + -- (in[i]* (max_range - min_range) / range(T)) ``` here `range(T) = -- numeric_limitsT::max() - numeric_limitsT::min()` -- -- -- -- If the input comes from a QuantizedRelu6, the output type is quint8 -- (range of 0-255) but the possible range of QuantizedRelu6 is 0-6. The -- min_range and max_range values are therefore 0.0 and 6.0. Dequantize -- on quint8 will take each value, cast to float, and multiply by 6 / -- 255. Note that if quantizedtype is qint8, the operation will -- additionally add each value by 128 prior to casting. -- -- If the mode is MIN_FIRST, then this approach is used: -- -- ``` number_of_steps = 1 << (# of bits in T) range_adjust = -- number_of_steps / (number_of_steps - 1) range = (range_max - -- range_min) * range_adjust range_scale = range / number_of_steps const -- double offset_input = static_castdouble(input) - -- lowest_quantized; result = range_min + ((input - -- numeric_limitsT::min()) * range_scale) ``` dequantize :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float dequantize' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float -- | Deserialize and concatenate SparseTensors from a serialized -- minibatch. -- -- The input serialized_sparse must be a string matrix of shape -- `[N x 3]` where N is the minibatch size and the rows -- correspond to packed outputs of SerializeSparse. The ranks of -- the original SparseTensor objects must all match. When the -- final SparseTensor is created, it has rank one higher than -- the ranks of the incoming SparseTensor objects (they have -- been concatenated along a new row dimension). -- -- The output SparseTensor object's shape values for all -- dimensions but the first are the max across the input -- SparseTensor objects' shape values for the corresponding -- dimensions. Its first shape value is N, the minibatch size. -- -- The input SparseTensor objects' indices are assumed ordered -- in standard lexicographic order. If this is not the case, after this -- step run SparseReorder to restore index ordering. -- -- For example, if the serialized input is a `[2 x 3]` matrix -- representing two original SparseTensor objects: -- -- index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] -- -- and -- -- index = [ 2] [10] values = [4, 5] shape = [30] -- -- then the final deserialized SparseTensor will be: -- -- index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] -- shape = [2 50] deserializeManySparse :: (TensorType dtype) => Tensor v'1 ByteString -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64) deserializeManySparse' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64) -- | Destroys the temporary variable and returns its final value. -- -- Sets output to the value of the Tensor pointed to by ref, -- then destroys the temporary variable called var_name. All -- other uses of ref *must* have executed before this op. This -- is typically achieved by chaining the ref through each assign op, or -- by using control dependencies. -- -- Outputs the final value of the tensor pointed to by ref. destroyTemporaryVariable :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Value t) destroyTemporaryVariable' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Value t) -- | Returns a diagonal tensor with a given diagonal values. -- -- Given a diagonal, this operation returns a tensor with the -- diagonal and everything else padded with zeros. The diagonal -- is computed as follows: -- -- Assume diagonal has dimensions [D1,..., Dk], then the output -- is a tensor of rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: -- -- `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 -- everywhere else. -- -- For example: -- -- ```prettyprint # diagonal is [1, 2, 3, 4] tf.diag(diagonal) -- ==> [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] ``` diag :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor Build t diag' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns the diagonal part of the tensor. -- -- This operation returns a tensor with the diagonal part of the -- input. The diagonal part is computed as follows: -- -- Assume input has dimensions `[D1,..., Dk, D1,..., Dk]`, then -- the output is a tensor of rank k with dimensions `[D1,..., -- Dk]` where: -- -- `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. -- -- For example: -- -- ```prettyprint # input is [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, -- 3, 0] [0, 0, 0, 4]] -- -- tf.diag_part(input) ==> [1, 2, 3, 4] ``` diagPart :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor Build t diagPart' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes Psi, the derivative of Lgamma (the log of the absolute value -- of -- -- `Gamma(x)`), element-wise. digamma :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t digamma' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the grayscale dilation of 4-D input and 3-D -- filter tensors. -- -- The input tensor has shape `[batch, in_height, in_width, -- depth]` and the filter tensor has shape `[filter_height, -- filter_width, depth]`, i.e., each input channel is processed -- independently of the others with its own structuring function. The -- output tensor has shape `[batch, out_height, out_width, -- depth]`. The spatial dimensions of the output tensor depend on the -- padding algorithm. We currently only support the default -- NHWC data_format. -- -- In detail, the grayscale morphological 2-D dilation is the max-sum -- correlation (for consistency with conv2d, we use unmirrored -- filters): -- -- output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * -- dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c] -- -- Max-pooling is a special case when the filter has size equal to the -- pooling kernel size and contains all zeros. -- -- Note on duality: The dilation of input by the filter -- is equal to the negation of the erosion of `-input` by the reflected -- filter. dilation2D :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t dilation2D' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes the gradient of morphological 2-D dilation with respect to -- the filter. dilation2DBackpropFilter :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t dilation2DBackpropFilter' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -- | Computes the gradient of morphological 2-D dilation with respect to -- the input. dilation2DBackpropInput :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t dilation2DBackpropInput' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -- | Returns x / y element-wise. -- -- div :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t div' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Draw bounding boxes on a batch of images. -- -- Outputs a copy of images but draws on top of the pixels zero -- or more bounding boxes specified by the locations in boxes. -- The coordinates of the each bounding box in boxes are encoded -- as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are -- floats in `[0.0, 1.0]` relative to the width and height of the -- underlying image. -- -- For example, if an image is 100 x 200 pixels and the bounding box is -- `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of -- the bounding box will be `(10, 40)` to `(50, 180)`. -- -- Parts of the bounding box may fall outside the image. drawBoundingBoxes :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor Build t drawBoundingBoxes' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor Build t -- | Partitions `data` into num_partitions tensors using indices -- from partitions. -- -- For each index tuple js of size `partitions.ndim`, the slice -- `data[js, ...]` becomes part of `outputs[partitions[js]]`. The slices -- with `partitions[js] = i` are placed in `outputs[i]` in lexicographic -- order of js, and the first dimension of `outputs[i]` is the -- number of entries in partitions equal to i. In -- detail, -- -- ```python outputs[i].shape = [sum(partitions == i)] + -- data.shape[partitions.ndim:] -- -- outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) ``` -- -- `data.shape` must start with `partitions.shape`. -- -- For example: -- -- ```python # Scalar partitions. partitions = 1 num_partitions = 2 data -- = [10, 20] outputs[0] = [] # Empty with shape [0, 2] outputs[1] = -- [[10, 20]] -- -- # Vector partitions. partitions = [0, 0, 1, 1, 0] num_partitions = 2 -- data = [10, 20, 30, 40, 50] outputs[0] = [10, 20, 50] outputs[1] = -- [30, 40] ``` -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/DynamicPartition.png" alt /div dynamicPartition :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor v'2 Int32 -> [Tensor Build t] dynamicPartition' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 Int32 -> [Tensor Build t] -- | Interleave the values from the `data` tensors into a single tensor. -- -- Builds a merged tensor such that -- -- ```python merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] -- ``` -- -- For example, if each `indices[m]` is scalar or vector, we have -- -- ```python # Scalar indices: merged[indices[m], ...] = data[m][...] -- -- # Vector indices: merged[indices[m][i], ...] = data[m][i, ...] ``` -- -- Each `data[i].shape` must start with the corresponding -- `indices[i].shape`, and the rest of `data[i].shape` must be constant -- w.r.t. i. That is, we must have `data[i].shape = -- indices[i].shape + constant`. In terms of this constant, the -- output shape is -- -- merged.shape = [max(indices)] + constant -- -- Values are merged in order, so if an index appears in both -- `indices[m][i]` and `indices[n][j]` for `(m,i) < (n,j)` the slice -- `data[n][j]` will appear in the merged result. -- -- For example: -- -- ```python indices[0] = 6 indices[1] = [4, 1] indices[2] = [[5, 2], [0, -- 3]] data[0] = [61, 62] data[1] = [[41, 42], [11, 12]] data[2] = [[[51, -- 52], [21, 22]], [[1, 2], [31, 32]]] merged = [[1, 2], [11, 12], [21, -- 22], [31, 32], [41, 42], [51, 52], [61, 62]] ``` -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/DynamicStitch.png" alt /div dynamicStitch :: (TensorType t) => [Tensor v'1 Int32] -> [Tensor v'2 t] -> Tensor Build t dynamicStitch' :: (TensorType t) => OpParams -> [Tensor v'1 Int32] -> [Tensor v'2 t] -> Tensor Build t -- | Computes the (possibly normalized) Levenshtein Edit Distance. -- -- The inputs are variable-length sequences provided by SparseTensors -- (hypothesis_indices, hypothesis_values, hypothesis_shape) and -- (truth_indices, truth_values, truth_shape). -- -- The inputs are: editDistance :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor Build Float editDistance' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor Build Float -- | Computes exponential linear: `exp(features) - 1` if < 0, -- features otherwise. -- -- See Fast and Accurate Deep Network Learning by Exponential Linear -- Units (ELUs) elu :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t elu' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes gradients for the exponential linear (Elu) operation. eluGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t eluGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Encode strings into web-safe base64 format. -- -- Refer to the following article for more information on base64 format: -- en.wikipedia.orgwikiBase64. Base64 strings may have padding -- with '=' at the end so that the encoded has length multiple of 4. See -- Padding section of the link above. -- -- Web-safe means that the encoder uses - and _ instead of + and /. encodeBase64 :: Tensor v'1 ByteString -> Tensor Build ByteString encodeBase64' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString -- | JPEG-encode an image. -- -- image is a 3-D uint8 Tensor of shape `[height, width, -- channels]`. -- -- The attr format can be used to override the color format of -- the encoded output. Values can be: -- -- -- -- If format is not specified or is the empty string, a default -- format is picked in function of the number of channels in -- image: -- -- encodeJpeg :: Tensor v'1 Word8 -> Tensor Build ByteString encodeJpeg' :: OpParams -> Tensor v'1 Word8 -> Tensor Build ByteString -- | PNG-encode an image. -- -- image is a 3-D uint8 or uint16 Tensor of shape `[height, -- width, channels]` where channels is: -- -- -- -- The ZLIB compression level, compression, can be -1 for the -- PNG-encoder default or a value from 0 to 9. 9 is the highest -- compression level, generating the smallest output, but is slower. encodePng :: (OneOf '[Word16, Word8] t) => Tensor v'1 t -> Tensor Build ByteString encodePng' :: (OneOf '[Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString -- | Creates or finds a child frame, and makes `data` available to the -- child frame. -- -- This op is used together with Exit to create loops in the -- graph. The unique frame_name is used by the Executor -- to identify frames. If is_constant is true, output -- is a constant in the child frame; otherwise it may be changed in the -- child frame. At most parallel_iterations iterations are run -- in parallel in the child frame. enter :: (TensorType t) => Tensor v'1 t -> Tensor Build t enter' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns the truth value of (x == y) element-wise. -- -- equal :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool equal' :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -- | Computes the Gauss error function of x element-wise. erf :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t erf' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the complementary error function of x element-wise. erfc :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t erfc' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Exits the current frame to its parent frame. -- -- Exit makes its input `data` available to the parent frame. exit :: (TensorType t) => Tensor v'1 t -> Tensor Build t exit' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes exponential of x element-wise. \(y = e^x\). exp :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t exp' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Inserts a dimension of 1 into a tensor's shape. -- -- Given a tensor input, this operation inserts a dimension of 1 -- at the dimension index dim of input's shape. The -- dimension index dim starts at zero; if you specify a negative -- number for dim it is counted backward from the end. -- -- This operation is useful if you want to add a batch dimension to a -- single element. For example, if you have a single image of shape -- `[height, width, channels]`, you can make it a batch of 1 image with -- `expand_dims(image, 0)`, which will make the shape `[1, height, width, -- channels]`. -- -- Other examples: -- -- ```prettyprint # t is a tensor of shape [2] -- shape(expand_dims(t, 0)) ==> [1, 2] shape(expand_dims(t, 1)) ==> -- [2, 1] shape(expand_dims(t, -1)) ==> [2, 1] -- -- # t2 is a tensor of shape [2, 3, 5] shape(expand_dims(t2, 0)) -- ==> [1, 2, 3, 5] shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] -- shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] ``` -- -- This operation requires that: -- -- `-1-input.dims() <= dim <= input.dims()` -- -- This operation is related to `squeeze()`, which removes dimensions of -- size 1. expandDims :: (TensorType t, OneOf '[Int32, Int64] tdim) => Tensor v'1 t -> Tensor v'2 tdim -> Tensor Build t expandDims' :: (TensorType t, OneOf '[Int32, Int64] tdim) => OpParams -> Tensor v'1 t -> Tensor v'2 tdim -> Tensor Build t -- | Computes exponential of x - 1 element-wise. -- -- I.e., \(y = (exp x) - 1\). expm1 :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t expm1' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Extracts a glimpse from the input tensor. -- -- Returns a set of windows called glimpses extracted at location -- offsets from the input tensor. If the windows only partially -- overlaps the inputs, the non overlapping areas will be filled with -- random noise. -- -- The result is a 4-D tensor of shape `[batch_size, glimpse_height, -- glimpse_width, channels]`. The channels and batch dimensions are the -- same as that of the input tensor. The height and width of the output -- windows are specified in the size parameter. -- -- The argument normalized and centered controls how -- the windows are built: -- -- extractGlimpse :: Tensor v'1 Float -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build Float extractGlimpse' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build Float -- | Extract patches from images and put them in the -- "depth" output dimension. extractImagePatches :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t extractImagePatches' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Compute the 1-dimensional discrete Fourier Transform over the -- inner-most -- -- dimension of input. fFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) fFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -- | Compute the 2-dimensional discrete Fourier Transform over the -- inner-most -- -- 2 dimensions of input. fFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) fFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -- | Compute the 3-dimensional discrete Fourier Transform over the -- inner-most 3 -- -- dimensions of input. fFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) fFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -- | A queue that produces elements in first-in first-out order. fIFOQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) fIFOQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) -- | A queue that produces elements in first-in first-out order. fIFOQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle) fIFOQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle) -- | Output a fact about factorials. fact :: Tensor Build ByteString fact' :: OpParams -> Tensor Build ByteString -- | Fake-quantize the inputs tensor, type float to -- outputs tensor of same type. -- -- Attributes [min; max] define the clamping range for the -- inputs data. Op divides this range into 255 steps (total of -- 256 values), then replaces each inputs value with the closest -- of the quantized step values. -- -- Quantization is called fake since the output is still in floating -- point. fakeQuantWithMinMaxArgs :: Tensor v'1 Float -> Tensor Build Float fakeQuantWithMinMaxArgs' :: OpParams -> Tensor v'1 Float -> Tensor Build Float -- | Compute gradients for a FakeQuantWithMinMaxArgs operation. fakeQuantWithMinMaxArgsGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float fakeQuantWithMinMaxArgsGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float -- | Fake-quantize the inputs tensor of type float and shape `[b, -- h, w, d]` via -- -- global float scalars min and max to outputs -- tensor of same shape as inputs. -- -- -- -- This operation has a gradient and thus allows for training min -- and max values. fakeQuantWithMinMaxVars :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float fakeQuantWithMinMaxVars' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float -- | Compute gradients for a FakeQuantWithMinMaxVars operation. fakeQuantWithMinMaxVarsGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) fakeQuantWithMinMaxVarsGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -- | Fake-quantize the inputs tensor of type float and one of the -- shapes: `[d]`, -- -- `[b, d]` `[b, h, w, d]` via per-channel floats min and -- max of shape `[d]` to outputs tensor of same shape as -- inputs. -- -- -- -- This operation has a gradient and thus allows for training min -- and max values. fakeQuantWithMinMaxVarsPerChannel :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float fakeQuantWithMinMaxVarsPerChannel' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float -- | Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. fakeQuantWithMinMaxVarsPerChannelGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) fakeQuantWithMinMaxVarsPerChannelGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -- | Deprecated. Do not use. fakeQueue :: (MonadBuild m') => ResourceHandle -> m' (Tensor Ref ByteString) fakeQueue' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Ref ByteString) -- | Creates a tensor filled with a scalar value. -- -- This operation creates a tensor of shape dims and fills it -- with value. -- -- For example: -- -- ```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9) -- ==> [[9, 9, 9] [9, 9, 9]] ``` fill :: (TensorType t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t fill' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t -- | A Reader that outputs fixed-length records from a file. fixedLengthRecordReader :: (MonadBuild m') => Int64 -> m' (Tensor Ref ByteString) fixedLengthRecordReader' :: (MonadBuild m') => OpParams -> Int64 -> m' (Tensor Ref ByteString) -- | A Reader that outputs fixed-length records from a file. fixedLengthRecordReaderV2 :: (MonadBuild m') => Int64 -> m' (ResourceHandle) fixedLengthRecordReaderV2' :: (MonadBuild m') => OpParams -> Int64 -> m' (ResourceHandle) -- | Generates labels for candidate sampling with a learned unigram -- distribution. -- -- A unigram sampler could use a fixed unigram distribution read from a -- file or passed in as an in-memory array instead of building up the -- distribution from data on the fly. There is also an option to skew the -- distribution by applying a distortion power to the weights. -- -- The vocabulary file should be in CSV-like format, with the last field -- being the weight associated with the word. -- -- For each batch, this op picks a single set of sampled candidate -- labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage -- is that the sampled candidates must be chosen independently of the -- context and of the true labels. fixedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) fixedUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -- | Returns element-wise largest integer not greater than x. floor :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t floor' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns x // y element-wise. -- -- floorDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t floorDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Returns element-wise remainder of division. When `x < 0` xor `y -- < 0` is -- -- true, this follows Python semantics in that the result here is -- consistent with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) -- = x`. -- -- floorMod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t floorMod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Performs fractional average pooling on the input. -- -- Fractional average pooling is similar to Fractional max pooling in the -- pooling region generation step. The only difference is that after -- pooling regions are generated, a mean operation is performed instead -- of a max operation in each pooling region. fractionalAvgPool :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) fractionalAvgPool' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) -- | Computes gradient of the FractionalAvgPool function. -- -- Unlike FractionalMaxPoolGrad, we don't need to find arg_max for -- FractionalAvgPoolGrad, we just need to evenly back-propagate each -- element of out_backprop to those indices that form the same pooling -- cell. Therefore, we just need to know the shape of original input -- tensor, instead of the whole tensor. fractionalAvgPoolGrad :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor Build t fractionalAvgPoolGrad' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor Build t -- | Performs fractional max pooling on the input. -- -- Fractional max pooling is slightly different than regular max pooling. -- In regular max pooling, you downsize an input set by taking the -- maximum value of smaller N x N subsections of the set (often 2x2), and -- try to reduce the set by a factor of N, where N is an integer. -- Fractional max pooling, as you might expect from the word -- "fractional", means that the overall reduction ratio N does not have -- to be an integer. -- -- The sizes of the pooling regions are generated randomly but are fairly -- uniform. For example, let's look at the height dimension, and the -- constraints on the list of rows that will be pool boundaries. -- -- First we define the following: -- --
    --
  1. input_row_length : the number of rows from the input set
  2. --
  3. output_row_length : which will be smaller than the input
  4. --
  5. alpha = input_row_length / output_row_length : our reduction -- ratio
  6. --
  7. K = floor(alpha)
  8. --
  9. row_pooling_sequence : this is the result list of pool boundary -- rows
  10. --
-- -- Then, row_pooling_sequence should satisfy: -- --
    --
  1. a[0] = 0 : the first value of the sequence is 0
  2. --
  3. a[end] = input_row_length : the last value of the sequence is the -- size
  4. --
  5. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 -- size
  6. --
  7. length(row_pooling_sequence) = output_row_length+1
  8. --
-- -- For more details on fractional max pooling, see this paper: -- Benjamin Graham, Fractional Max-Pooling fractionalMaxPool :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) fractionalMaxPool' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) -- | Computes gradient of the FractionalMaxPool function. fractionalMaxPoolGrad :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 Int64 -> Tensor v'5 Int64 -> Tensor Build t fractionalMaxPoolGrad' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 Int64 -> Tensor v'5 Int64 -> Tensor Build t -- | Batch normalization. -- -- Note that the size of 4D Tensors are defined by either NHWC or -- NCHW. The size of 1D Tensors matches the dimension C of the 4D -- Tensors. fusedBatchNorm :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) fusedBatchNorm' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- | Gradient for batch normalization. -- -- Note that the size of 4D Tensors are defined by either NHWC or -- NCHW. The size of 1D Tensors matches the dimension C of the 4D -- Tensors. fusedBatchNormGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) fusedBatchNormGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- | Performs a padding as a preprocess during a convolution. -- -- Similar to FusedResizeAndPadConv2d, this op allows for an optimized -- implementation where the spatial padding transformation stage is fused -- with the im2col lookup, but in this case without the bilinear -- filtering required for resizing. Fusing the padding prevents the need -- to write out the intermediate results as whole tensors, reducing -- memory pressure, and we can get some latency gains by merging the -- transformation calculations. The data_format attribute for Conv2D -- isn't supported by this op, and NHWC order is used instead. -- Internally this op uses a single per-graph scratch buffer, which means -- that it will block if multiple versions are being run in parallel. -- This is because this operator is primarily an optimization to minimize -- memory usage. fusedPadConv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t fusedPadConv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t -- | Performs a resize and padding as a preprocess during a convolution. -- -- It's often possible to do spatial transformations more efficiently as -- part of the packing stage of a convolution, so this op allows for an -- optimized implementation where these stages are fused together. This -- prevents the need to write out the intermediate results as whole -- tensors, reducing memory pressure, and we can get some latency gains -- by merging the transformation calculations. The data_format attribute -- for Conv2D isn't supported by this op, and defaults to NHWC -- order. Internally this op uses a single per-graph scratch buffer, -- which means that it will block if multiple versions are being run in -- parallel. This is because this operator is primarily an optimization -- to minimize memory usage. fusedResizeAndPadConv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor v'4 t -> Tensor Build t fusedResizeAndPadConv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor v'4 t -> Tensor Build t -- | Gather slices from params according to indices. -- -- indices must be an integer tensor of any dimension (usually -- 0-D or 1-D). Produces an output tensor with shape `indices.shape + -- params.shape[1:]` where: -- -- ```python # Scalar indices output[:, ..., :] = params[indices, :, ... -- :] -- -- # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] -- -- # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, -- ..., j], :, ..., :] ``` -- -- If indices is a permutation and `len(indices) == -- params.shape[0]` then this operation will permute params -- accordingly. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/Gather.png" alt /div gather :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams gather' :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams -- | Gather values or slices from params according to -- indices. -- -- params is a Tensor of rank P and indices is -- a Tensor of rank Q. -- -- indices must be integer tensor, containing indices into -- params. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 -- < K <= P`. -- -- The innermost dimension of indices (with length K) -- corresponds to indices into elements (if `K = P`) or slices (if `K -- < P`) along the Kth dimension of params. -- -- Produces an output tensor with shape -- -- ``` [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]]. ``` -- -- Some examples below. -- -- Simple indexing into a matrix: -- -- ```python indices = [[0, 0], [1, 1]] params = [[a, -- b], [c, d]] output = [a, -- d] ``` -- -- Slice indexing into a matrix: -- -- ```python indices = [[1], [0]] params = [[a, b], -- [c, d]] output = [[c, d], -- [a, b]] ``` -- -- Indexing into a 3-tensor: -- -- ```python indices = [[1]] params = [[[a0, b0], -- [c0, d0]], [[a1, b1], -- [c1, d1]]] output = [[[a1, b1], -- [c1, d1]]] -- -- indices = [[0, 1], [1, 0]] params = [[[a0, b0], -- [c0, d0]], [[a1, b1], -- [c1, d1]]] output = [[c0, d0], -- [a1, b1]] -- -- indices = [[0, 0, 1], [1, 0, 1]] params = [[[a0, -- b0], [c0, d0]], [[a1, -- b1], [c1, d1]]] output = [b0, -- b1] ``` -- -- Batched indexing into a matrix: -- -- ```python indices = [[[0, 0]], [[0, 1]]] params = [[a, -- b], [c, d]] output = [[a], -- [b]] ``` -- -- Batched slice indexing into a matrix: -- -- ```python indices = [[[1]], [[0]]] params = [[a, b], -- [c, d]] output = [[[c, d]], -- [[a, b]]] ``` -- -- Batched indexing into a 3-tensor: -- -- ```python indices = [[[1]], [[0]]] params = [[[a0, -- b0], [c0, d0]], [[a1, -- b1], [c1, d1]]] output = [[[[a1, -- b1], [c1, d1]]], [[[a0, -- b0], [c0, d0]]]] -- -- indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] params = -- [[[a0, b0], [c0, d0]], -- [[a1, b1], [c1, d1]]] output = -- [[[c0, d0], [a1, b1]], -- [[a0, b0], [c1, d1]]] -- -- indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] params = -- [[[a0, b0], [c0, d0]], -- [[a1, b1], [c1, d1]]] output = -- [[b0, b1], [d0, c1]] ``` gatherNd :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams gatherNd' :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams -- | Store the input tensor in the state of the current session. getSessionHandle :: (TensorType t) => Tensor v'1 t -> Tensor Build ByteString getSessionHandle' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString -- | Get the value of the tensor specified by its handle. getSessionTensor :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor Build dtype getSessionTensor' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor Build dtype -- | Returns the truth value of (x > y) element-wise. -- -- greater :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool greater' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -- | Returns the truth value of (x >= y) element-wise. -- -- greaterEqual :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool greaterEqual' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -- | Convert one or more images from HSV to RGB. -- -- Outputs a tensor of the same shape as the images tensor, -- containing the RGB value of the pixels. The output is only well -- defined if the value in images are in `[0,1]`. -- -- See rgb_to_hsv for a description of the HSV encoding. hSVToRGB :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t hSVToRGB' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Creates a non-initialized hash table. -- -- This op creates a hash table, specifying the type of its keys and -- values. Before using the table you will have to initialize it. After -- initialization the table will be immutable. hashTable :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString) hashTable' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString) -- | Outputs a Summary protocol buffer with a histogram. -- -- The generated `Summary` has one summary value containing a -- histogram for values. -- -- This op reports an InvalidArgument error if any value is not -- finite. histogramSummary :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString histogramSummary' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString -- | Compute the inverse 1-dimensional discrete Fourier Transform over the -- inner-most -- -- dimension of input. iFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) iFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -- | Compute the inverse 2-dimensional discrete Fourier Transform over the -- inner-most -- -- 2 dimensions of input. iFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) iFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -- | Compute the inverse 3-dimensional discrete Fourier Transform over the -- inner-most -- -- 3 dimensions of input. iFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) iFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) -- | Return a tensor with the same shape and contents as the input tensor -- or value. identity :: (TensorType t) => Tensor v'1 t -> Tensor Build t identity' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | A Reader that outputs the queued work as both the key and value. -- -- To use, enqueue strings in a Queue. ReaderRead will take the front -- work string and output (work, work). identityReader :: (MonadBuild m') => m' (Tensor Ref ByteString) identityReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) -- | A Reader that outputs the queued work as both the key and value. -- -- To use, enqueue strings in a Queue. ReaderRead will take the front -- work string and output (work, work). identityReaderV2 :: (MonadBuild m') => m' (ResourceHandle) identityReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) -- | Compute the lower regularized incomplete Gamma function `Q(a, x)`. -- -- The lower regularized incomplete Gamma function is defined as: -- -- ``` P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) ``` where ``` -- gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt ``` is the lower -- incomplete Gamma function. -- -- Note, above `Q(a, x)` (Igammac) is the upper regularized -- complete Gamma function. igamma :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t igamma' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Compute the upper regularized incomplete Gamma function `Q(a, x)`. -- -- The upper regularized incomplete Gamma function is defined as: -- -- ``` Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) ``` where ``` -- Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt ``` is the upper -- incomplete Gama function. -- -- Note, above `P(a, x)` (Igamma) is the lower regularized -- complete Gamma function. igammac :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t igammac' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Returns the imaginary part of a complex number. -- -- Given a tensor input of complex numbers, this operation -- returns a tensor of type float that is the imaginary part of -- each element in input. All elements in input must be -- complex numbers of the form \(a + bj\), where *a* is the real part and -- *b* is the imaginary part returned by this operation. -- -- For example: -- -- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] -- tf.imag(input) ==> [4.75, 5.75] ``` imag :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout imag' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout -- | Outputs a Summary protocol buffer with images. -- -- The summary has up to max_images summary values containing -- images. The images are built from tensor which must be 4-D -- with shape `[batch_size, height, width, channels]` and where -- channels can be: -- -- -- -- The images have the same number of channels as the input tensor. For -- float input, the values are normalized one image at a time to fit in -- the range `[0, 255]`. uint8 values are unchanged. The op uses -- two different normalization algorithms: -- -- -- -- The tag argument is a scalar Tensor of type -- string. It is used to build the tag of the summary -- values: -- -- -- -- The bad_color argument is the color to use in the generated -- images for non-finite input values. It is a unit8 1-D tensor -- of length channels. Each element must be in the range `[0, -- 255]` (It represents the value of a pixel in the output image). -- Non-finite values in the input tensor are replaced by this tensor in -- the output image. The default value is the color red. imageSummary :: (OneOf '[Word16, Word8, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString imageSummary' :: (OneOf '[Word16, Word8, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString -- | Returns immutable tensor from memory region. -- -- The current implementation memmaps the tensor from a file. immutableConst :: (TensorType dtype) => Shape -> Tensor Build dtype immutableConst' :: (TensorType dtype) => OpParams -> Shape -> Tensor Build dtype -- | Says whether the targets are in the top K predictions. -- -- This outputs a batch_size bool array, an entry `out[i]` is -- true if the prediction for the target class is among the top -- k predictions among all predictions for example i. -- Note that the behavior of InTopK differs from the -- TopK op in its handling of ties; if multiple classes have the -- same prediction value and straddle the top-k boundary, all of -- those classes are considered to be in the top k. -- -- More formally, let -- -- \(predictions_i\) be the predictions for all classes for example -- i, \(targets_i\) be the target class for example i, -- \(out_i\) be the output for example i, -- -- $$out_i = predictions_{i, targets_i} in -- TopKIncludingTies(predictions_i)$$ inTopK :: (OneOf '[Int32, Int64] t) => Int64 -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build Bool inTopK' :: (OneOf '[Int32, Int64] t) => OpParams -> Int64 -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build Bool -- | Table initializer that takes two tensors for keys and values -- respectively. initializeTable :: (MonadBuild m', TensorType tkey, TensorType tval) => Tensor Ref ByteString -> Tensor v'2 tkey -> Tensor v'3 tval -> m' (ControlNode) initializeTable' :: (MonadBuild m', TensorType tkey, TensorType tval) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tkey -> Tensor v'3 tval -> m' (ControlNode) -- | Initializes a table from a text file. -- -- It inserts one key-value pair into the table for each line of the -- file. The key and value is extracted from the whole line content, -- elements from the split line based on delimiter or the line -- number (starting from zero). Where to extract the key and value from a -- line is specified by key_index and value_index. -- -- initializeTableFromTextFile :: (MonadBuild m') => Int64 -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) initializeTableFromTextFile' :: (MonadBuild m') => OpParams -> Int64 -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) -- | Computes the reciprocal of x element-wise. -- -- I.e., \(y = 1 / x\). inv :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t inv' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the gradient for the inverse of x wrt its input. -- -- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy is -- the corresponding input gradient. invGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t invGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes the inverse permutation of a tensor. -- -- This operation computes the inverse of an index permutation. It takes -- a 1-D integer tensor x, which represents the indices of a -- zero-based array, and swaps each value with its index position. In -- other words, for an output tensor y and an input tensor -- x, this operation computes the following: -- -- `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` -- -- The values must include 0. There can be no duplicate values or -- negative values. -- -- For example: -- -- ```prettyprint # tensor x is [3, 4, 0, 2, 1] -- invert_permutation(x) ==> [2, 4, 3, 0, 1] ``` invertPermutation :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor Build t invertPermutation' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns which elements of x are finite. -- -- compatibility(numpy) Equivalent to np.isfinite -- end_compatibility isFinite :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool isFinite' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool -- | Returns which elements of x are Inf. -- -- compatibility(numpy) Equivalent to np.isinf end_compatibility isInf :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool isInf' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool -- | Returns which elements of x are NaN. -- -- compatibility(numpy) Equivalent to np.isnan end_compatibility isNan :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool isNan' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool -- | Checks whether a tensor has been initialized. -- -- Outputs boolean scalar indicating whether the tensor has been -- initialized. isVariableInitialized :: (MonadBuild m', TensorType dtype) => Tensor Ref dtype -> m' (Tensor Value Bool) isVariableInitialized' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref dtype -> m' (Tensor Value Bool) -- | L2 Loss. -- -- Computes half the L2 norm of a tensor without the sqrt: -- -- output = sum(t ** 2) / 2 l2Loss :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t l2Loss' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Local Response Normalization. -- -- The 4-D input tensor is treated as a 3-D array of 1-D vectors -- (along the last dimension), and each vector is normalized -- independently. Within a given vector, each component is divided by the -- weighted, squared sum of inputs within depth_radius. In -- detail, -- -- sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + -- depth_radius + 1] ** 2) output = input / (bias + alpha * sqr_sum) ** -- beta -- -- For details, see Krizhevsky et al., ImageNet classification with -- deep convolutional neural networks (NIPS 2012). lRN :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor Build t lRN' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Gradients for Local Response Normalization. lRNGrad :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t lRNGrad' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -- | Generates labels for candidate sampling with a learned unigram -- distribution. -- -- See explanations of candidate sampling and the data formats at -- go/candidate-sampling. -- -- For each batch, this op picks a single set of sampled candidate -- labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage -- is that the sampled candidates must be chosen independently of the -- context and of the true labels. learnedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) learnedUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -- | Returns the truth value of (x < y) element-wise. -- -- less :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool less' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -- | Returns the truth value of (x <= y) element-wise. -- -- lessEqual :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool lessEqual' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -- | Computes the log of the absolute value of `Gamma(x)` element-wise. lgamma :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t lgamma' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Generates values in an interval. -- -- A sequence of num evenly-spaced values are generated -- beginning at start. If `num > 1`, the values in the -- sequence increase by `stop - start / num - 1`, so that the last one is -- exactly stop. -- -- For example: -- -- ``` tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 -- 12.0] ``` linSpace :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 tidx -> Tensor Build t linSpace' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 tidx -> Tensor Build t -- | Computes the difference between two lists of numbers or strings. -- -- Given a list x and a list y, this operation returns -- a list out that represents all values that are in x -- but not in y. The returned list out is sorted in the -- same order that the numbers appear in x (duplicates are -- preserved). This operation also returns a list idx that -- represents the position of each out element in x. In -- other words: -- -- `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` -- -- For example, given this input: -- -- ```prettyprint x = [1, 2, 3, 4, 5, 6] y = [1, 3, 5] ``` -- -- This operation would return: -- -- ```prettyprint out ==> [2, 4, 6] idx ==> [1, 3, 5] ``` listDiff :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build out_idx) listDiff' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build out_idx) -- | Computes natural logarithm of x element-wise. -- -- I.e., \(y = log_e x\). log :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t log' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes natural logarithm of (1 + x) element-wise. -- -- I.e., \(y = log_e (1 + x)\). log1p :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t log1p' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes log softmax activations. -- -- For each batch i and class j we have -- -- logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) logSoftmax :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t logSoftmax' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Generates labels for candidate sampling with a log-uniform -- distribution. -- -- See explanations of candidate sampling and the data formats at -- go/candidate-sampling. -- -- For each batch, this op picks a single set of sampled candidate -- labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage -- is that the sampled candidates must be chosen independently of the -- context and of the true labels. logUniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) logUniformCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -- | Returns the truth value of x AND y element-wise. -- -- logicalAnd :: Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool logicalAnd' :: OpParams -> Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool -- | Returns the truth value of NOT x element-wise. logicalNot :: Tensor v'1 Bool -> Tensor Build Bool logicalNot' :: OpParams -> Tensor v'1 Bool -> Tensor Build Bool -- | Returns the truth value of x OR y element-wise. -- -- logicalOr :: Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool logicalOr' :: OpParams -> Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool -- | Outputs all keys and values in the table. lookupTableExport :: (MonadBuild m', TensorType tkeys, TensorType tvalues) => Tensor Ref ByteString -> m' ((Tensor Value tkeys, Tensor Value tvalues)) lookupTableExport' :: (MonadBuild m', TensorType tkeys, TensorType tvalues) => OpParams -> Tensor Ref ByteString -> m' ((Tensor Value tkeys, Tensor Value tvalues)) -- | Looks up keys in a table, outputs the corresponding values. -- -- The tensor keys must of the same type as the keys of the -- table. The output values is of the type of the table values. -- -- The scalar default_value is the value output for keys not -- present in the table. It must also be of the same type as the table -- values. lookupTableFind :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (Tensor Value tout) lookupTableFind' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (Tensor Value tout) -- | Replaces the contents of the table with the specified keys and values. -- -- The tensor keys must be of the same type as the keys of the -- table. The tensor values must be of the type of the table -- values. lookupTableImport :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) lookupTableImport' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) -- | Updates the table to associates keys with values. -- -- The tensor keys must be of the same type as the keys of the -- table. The tensor values must be of the type of the table -- values. lookupTableInsert :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) lookupTableInsert' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) -- | Computes the number of elements in the given table. lookupTableSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64) lookupTableSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64) -- | Forwards the input to the output. -- -- This operator represents the loop termination condition used by the -- "pivot" switches of a loop. loopCond :: Tensor v'1 Bool -> Tensor Build Bool loopCond' :: OpParams -> Tensor v'1 Bool -> Tensor Build Bool -- | Multiply the matrix "a" by the matrix "b". -- -- The inputs must be two-dimensional matrices and the inner dimension of -- "a" (after being transposed if transpose_a is true) must match the -- outer dimension of "b" (after being transposed if transposed_b is -- true). -- -- matMul :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t matMul' :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Returns the set of files matching a pattern. -- -- Note that this routine only supports wildcard characters in the -- basename portion of the pattern, not in the directory portion. matchingFiles :: Tensor v'1 ByteString -> Tensor Build ByteString matchingFiles' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString -- | Copy a tensor setting everything outside a central band in each -- innermost matrix -- -- to zero. -- -- The band part is computed as follows: Assume input -- has k dimensions `[I, J, K, ..., M, N]`, then the output is a -- tensor with the same shape where -- -- `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, -- n]`. -- -- The indicator function -- -- `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) -- && (num_upper < 0 || (n-m) <= num_upper)`. -- -- For example: -- -- ```prettyprint # if input is [[ 0, 1, 2, 3] [-1, 0, 1, 2] -- [-2, -1, 0, 1] [-3, -2, -1, 0]], -- -- tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] [-1, 0, 1, 2] -- [ 0, -1, 0, 1] [ 0, 0, -1, 0]], -- -- tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] [-1, 0, 1, 0] -- [-2, -1, 0, 1] [ 0, -2, -1, 0]] ``` -- -- Useful special cases: -- -- ```prettyprint tf.matrix_band_part(input, 0, -1) ==> Upper -- triangular part. tf.matrix_band_part(input, -1, 0) ==> Lower -- triangular part. tf.matrix_band_part(input, 0, 0) ==> Diagonal. ``` matrixBandPart :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t matrixBandPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t -- | Computes the determinant of one ore more square matrices. -- -- The input is a tensor of shape `[..., M, M]` whose inner-most 2 -- dimensions form square matrices. The output is a tensor containing the -- determinants for all input submatrices `[..., :, :]`. matrixDeterminant :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t matrixDeterminant' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns a batched diagonal tensor with a given batched diagonal -- values. -- -- Given a diagonal, this operation returns a tensor with the -- diagonal and everything else padded with zeros. The diagonal -- is computed as follows: -- -- Assume diagonal has k dimensions `[I, J, K, ..., -- N]`, then the output is a tensor of rank `k+1` with dimensions [I, J, -- K, ..., N, N]` where: -- -- `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. -- -- For example: -- -- ```prettyprint # diagonal is [[1, 2, 3, 4], [5, 6, 7, 8]] -- -- and diagonal.shape = (2, 4) -- -- tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, -- 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, -- 8]]] -- -- which has shape (2, 4, 4) ``` matrixDiag :: (TensorType t) => Tensor v'1 t -> Tensor Build t matrixDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns the batched diagonal part of a batched tensor. -- -- This operation returns a tensor with the diagonal part of the -- batched input. The diagonal part is computed as -- follows: -- -- Assume input has k dimensions `[I, J, K, ..., M, -- N]`, then the output is a tensor of rank `k - 1` with dimensions `[I, -- J, K, ..., min(M, N)]` where: -- -- `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. -- -- The input must be at least a matrix. -- -- For example: -- -- ```prettyprint # input is [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, -- 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, -- 8]]] -- -- and input.shape = (2, 4, 4) -- -- tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] -- -- which has shape (2, 4) ``` matrixDiagPart :: (TensorType t) => Tensor v'1 t -> Tensor Build t matrixDiagPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the inverse of one or more square invertible matrices or -- their -- -- adjoints (conjugate transposes). -- -- The input is a tensor of shape `[..., M, M]` whose inner-most 2 -- dimensions form square matrices. The output is a tensor of the same -- shape as the input containing the inverse for all input submatrices -- `[..., :, :]`. -- -- The op uses LU decomposition with partial pivoting to compute the -- inverses. -- -- If a matrix is not invertible there is no guarantee what the op does. -- It may detect the condition and raise an exception or it may simply -- return a garbage result. matrixInverse :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t matrixInverse' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns a batched matrix tensor with new batched diagonal values. -- -- Given input and diagonal, this operation returns a -- tensor with the same shape and values as input, except for -- the main diagonal of the innermost matrices. These will be overwritten -- by the values in diagonal. -- -- The output is computed as follows: -- -- Assume input has `k+1` dimensions `[I, J, K, ..., M, N]` and -- diagonal has k dimensions `[I, J, K, ..., min(M, -- N)]`. Then the output is a tensor of rank `k+1` with dimensions `[I, -- J, K, ..., M, N]` where: -- -- matrixSetDiag :: (TensorType t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t matrixSetDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Solves systems of linear equations. -- -- Matrix is a tensor of shape `[..., M, M]` whose inner-most 2 -- dimensions form square matrices. Rhs is a tensor of shape -- `[..., M, K]`. The output is a tensor shape `[..., M, K]`. If -- adjoint is False then each output matrix satisfies -- `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If -- adjoint is True then each output matrix satisfies -- `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. matrixSolve :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t matrixSolve' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Solves one or more linear least-squares problems. -- -- matrix is a tensor of shape `[..., M, N]` whose inner-most 2 -- dimensions form matrices of size `[M, N]`. Rhs is a tensor of shape -- `[..., M, K]`. The output is a tensor shape `[..., N, K]` where each -- output matrix solves each of the equations matrix[..., :, :] * -- output[..., :, :] = rhs[..., :, :] in the least squares sense. -- -- matrix and right-hand sides in the batch: -- -- matrix=\(A in Re^{m times n}\), rhs=\(B in Re^{m -- times k}\), output=\(X in Re^{n times k}\), -- l2_regularizer=\(lambda\). -- -- If fast is True, then the solution is computed by -- solving the normal equations using Cholesky decomposition. -- Specifically, if \(m ge n\) then \(X = (A^T A + lambda I)^{-1} A^T -- B\), which solves the least-squares problem \(X = mathrm{argmin}_{Z in -- Re^{n times k} } ||A Z - B||_F^2 + lambda ||Z||_F^2\). If \(m lt n\) -- then output is computed as \(X = A^T (A A^T + lambda I)^{-1} -- B\), which (for \(lambda = 0\)) is the minimum-norm solution to the -- under-determined linear system, i.e. \(X = mathrm{argmin}_{Z in Re^{n -- times k} } ||Z||_F^2 \), subject to \(A Z = B\). Notice that the fast -- path is only numerically stable when \(A\) is numerically full rank -- and has a condition number \(mathrm{cond}(A) lt -- frac{1}{sqrt{epsilon_{mach} } }\) or\(lambda\) is sufficiently large. -- -- If fast is False an algorithm based on the numerically -- robust complete orthogonal decomposition is used. This computes the -- minimum-norm least-squares solution, even when \(A\) is rank -- deficient. This path is typically 6-7 times slower than the fast path. -- If fast is False then l2_regularizer is -- ignored. matrixSolveLs :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t matrixSolveLs' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t -- | Solves systems of linear equations with upper or lower triangular -- matrices by -- -- backsubstitution. -- -- matrix is a tensor of shape `[..., M, M]` whose inner-most 2 -- dimensions form square matrices. If lower is True then -- the strictly upper triangular part of each inner-most matrix is -- assumed to be zero and not accessed. If lower is False then -- the strictly lower triangular part of each inner-most matrix is -- assumed to be zero and not accessed. rhs is a tensor of shape -- `[..., M, K]`. -- -- The output is a tensor of shape `[..., M, K]`. If adjoint is -- True then the innermost matrices in output` satisfy matrix -- equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If -- adjoint is False then the strictly then the innermost -- matrices in output satisfy matrix equations -- `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. matrixTriangularSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t matrixTriangularSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes the maximum of elements across dimensions of a tensor. -- -- Reduces input along the dimensions given in -- reduction_indices. Unless keep_dims is true, the -- rank of the tensor is reduced by 1 for each entry in -- reduction_indices. If keep_dims is true, the reduced -- dimensions are retained with length 1. max :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t max' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -- | Performs max pooling on the input. maxPool :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor Build t maxPool' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Performs 3D max pooling on the input. maxPool3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t maxPool3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes gradients of max pooling function. maxPool3DGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 t -> Tensor Build t maxPool3DGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 t -> Tensor Build t -- | Computes gradients of the maxpooling function. maxPoolGrad :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t maxPoolGrad' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -- | Computes gradients of the maxpooling function. maxPoolGradWithArgmax :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 targmax -> Tensor Build t maxPoolGradWithArgmax' :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 targmax -> Tensor Build t -- | Performs max pooling on the input and outputs both max values and -- indices. -- -- The indices in argmax are flattened, so that a maximum value -- at position `[b, y, x, c]` becomes flattened index `((b * height + y) -- * width + x) * channels + c`. maxPoolWithArgmax :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build targmax) maxPoolWithArgmax' :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build targmax) -- | Returns the max of x and y (i.e. x > y ? x : y) element-wise. -- -- maximum :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t maximum' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes the mean of elements across dimensions of a tensor. -- -- Reduces input along the dimensions given in -- reduction_indices. Unless keep_dims is true, the -- rank of the tensor is reduced by 1 for each entry in -- reduction_indices. If keep_dims is true, the reduced -- dimensions are retained with length 1. mean :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t mean' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -- | Forwards the value of an available tensor from inputs to -- output. -- -- Merge waits for at least one of the tensors in -- inputs to become available. It is usually combined with -- Switch to implement branching. -- -- Merge forwards the first tensor for become available to -- output, and sets value_index to its index in -- inputs. merge :: (TensorType t) => [Tensor v'1 t] -> (Tensor Build t, Tensor Build Int32) merge' :: (TensorType t) => OpParams -> [Tensor v'1 t] -> (Tensor Build t, Tensor Build Int32) -- | Merges summaries. -- -- This op creates a `Summary` protocol buffer that contains the -- union of all the values in the input summaries. -- -- When the Op is run, it reports an InvalidArgument error if -- multiple values in the summaries to merge use the same tag. mergeSummary :: [Tensor v'1 ByteString] -> Tensor Build ByteString mergeSummary' :: OpParams -> [Tensor v'1 ByteString] -> Tensor Build ByteString -- | V2 format specific: merges the metadata files of sharded checkpoints. -- The -- -- result is one logical checkpoint, with one physical metadata file and -- renamed data files. -- -- Intended for "grouping" multiple checkpoints in a sharded checkpoint -- setup. -- -- If delete_old_dirs is true, attempts to delete recursively the dirname -- of each path in the input checkpoint_prefixes. This is useful when -- those paths are non user-facing temporary locations. mergeV2Checkpoints :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) mergeV2Checkpoints' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) -- | Computes the minimum of elements across dimensions of a tensor. -- -- Reduces input along the dimensions given in -- reduction_indices. Unless keep_dims is true, the -- rank of the tensor is reduced by 1 for each entry in -- reduction_indices. If keep_dims is true, the reduced -- dimensions are retained with length 1. min :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t min' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -- | Returns the min of x and y (i.e. x < y ? x : y) element-wise. -- -- minimum :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t minimum' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Pads a tensor with mirrored values. -- -- This operation pads a input with mirrored values according to -- the paddings you specify. paddings is an integer -- tensor with shape `[n, 2]`, where n is the rank of input. For -- each dimension D of input, `paddings[D, 0]` indicates how -- many values to add before the contents of input in that -- dimension, and `paddings[D, 1]` indicates how many values to add after -- the contents of input in that dimension. Both `paddings[D, -- 0]` and `paddings[D, 1]` must be no greater than `input.dim_size(D)` -- (or `input.dim_size(D) - 1`) if copy_border is true (if -- false, respectively). -- -- The padded size of each dimension D of the output is: -- -- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` -- -- For example: -- -- ```prettyprint # t is [[1, 2, 3], [4, 5, 6]]. # -- paddings is [[1, 1]], [2, 2]]. # mode is SYMMETRIC. -- # rank of t is 2. pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, -- 2] [2, 1, 1, 2, 3, 3, 2] [5, 4, 4, 5, 6, 6, 5] [5, 4, 4, 5, 6, 6, 5]] -- ``` mirrorPad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t mirrorPad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t -- | Gradient op for MirrorPad op. This op folds a mirror-padded -- tensor. -- -- This operation folds the padded areas of input by -- MirrorPad according to the paddings you specify. -- paddings must be the same as paddings argument given -- to the corresponding MirrorPad op. -- -- The folded size of each dimension D of the output is: -- -- `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` -- -- For example: -- -- ```prettyprint # t is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. # -- paddings is [[0, 1]], [0, 1]]. # mode is SYMMETRIC. -- # rank of t is 2. pad(t, paddings) ==> [[ 1, 5] [11, 28]] -- ``` mirrorPadGrad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t mirrorPadGrad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t -- | Returns element-wise remainder of division. -- -- mod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t mod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Returns x * y element-wise. -- -- mul :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t mul' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Draws samples from a multinomial distribution. multinomial :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> m' (Tensor Value Int64) multinomial' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> m' (Tensor Value Int64) -- | Creates an empty hash table that uses tensors as the backing store. It -- uses -- -- "open addressing" with quadratic reprobing to resolve collisions. -- -- This op creates a mutable hash table, specifying the type of its keys -- and values. Each value must be a scalar. Data can be inserted into the -- table using the insert operations. It does not support the -- initialization operation. mutableDenseHashTable :: (MonadBuild m', TensorType key_dtype) => DataType -> Tensor v'1 key_dtype -> m' (Tensor Ref ByteString) mutableDenseHashTable' :: (MonadBuild m', TensorType key_dtype) => OpParams -> DataType -> Tensor v'1 key_dtype -> m' (Tensor Ref ByteString) -- | Creates an empty hash table. -- -- This op creates a mutable hash table, specifying the type of its keys -- and values. Each value must be a scalar. Data can be inserted into the -- table using the insert operations. It does not support the -- initialization operation. mutableHashTable :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString) mutableHashTable' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString) -- | Creates an empty hash table. -- -- This op creates a mutable hash table, specifying the type of its keys -- and values. Each value must be a vector. Data can be inserted into the -- table using the insert operations. It does not support the -- initialization operation. mutableHashTableOfTensors :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString) mutableHashTableOfTensors' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString) -- | Computes numerical negative value element-wise. -- -- I.e., \(y = -x\). neg :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t neg' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Training via negative sampling. negTrain :: (MonadBuild m') => Int64 -> Tensor Ref Float -> Tensor Ref Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor v'5 Float -> m' (ControlNode) negTrain' :: (MonadBuild m') => OpParams -> Int64 -> Tensor Ref Float -> Tensor Ref Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor v'5 Float -> m' (ControlNode) -- | Makes its input available to the next iteration. nextIteration :: (TensorType t) => Tensor v'1 t -> Tensor Build t nextIteration' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Does nothing. Only useful as a placeholder for control edges. noOp :: (MonadBuild m') => m' (ControlNode) noOp' :: (MonadBuild m') => OpParams -> m' (ControlNode) -- | Greedily selects a subset of bounding boxes in descending order of -- score, -- -- pruning away boxes that have high intersection-over-union (IOU) -- overlap with previously selected boxes. Bounding boxes are supplied as -- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of -- any diagonal pair of box corners and the coordinates can be provided -- as normalized (i.e., lying in the interval [0, 1]) or absolute. Note -- that this algorithm is agnostic to where the origin is in the -- coordinate system. Note that this algorithm is invariant to orthogonal -- transformations and translations of the coordinate system; thus -- translating or reflections of the coordinate system result in the same -- boxes being selected by the algorithm. -- -- The output of this operation is a set of integers indexing into the -- input collection of bounding boxes representing the selected boxes. -- The bounding box coordinates corresponding to the selected indices can -- then be obtained using the `tf.gather operation`. For example: -- -- selected_indices = tf.image.non_max_suppression( boxes, scores, -- max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, -- selected_indices) nonMaxSuppression :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor Build Int32 nonMaxSuppression' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor Build Int32 -- | Returns the truth value of (x != y) element-wise. -- -- notEqual :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool notEqual' :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool -- | Returns a one-hot tensor. -- -- The locations represented by indices in indices take value -- on_value, while all other locations take value -- off_value. -- -- If the input indices is rank N, the output will have -- rank `N+1`, The new axis is created at dimension axis -- (default: the new axis is appended at the end). -- -- If indices is a scalar the output shape will be a vector of -- length depth. -- -- If indices is a vector of length features, the -- output shape will be: ``` features x depth if axis == -1 depth x -- features if axis == 0 ``` -- -- If indices is a matrix (batch) with shape `[batch, -- features]`, the output shape will be: ``` batch x features x depth if -- axis == -1 batch x depth x features if axis == 1 depth x batch x -- features if axis == 0 ``` -- -- Examples ========= -- -- Suppose that -- -- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 -- axis = -1 ``` -- -- Then output is `[4 x 3]`: -- -- ```output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) -- [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ``` -- -- Suppose that -- -- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0 -- axis = 0 ``` -- -- Then output is `[3 x 4]`: -- -- ```output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0 -- 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^ -- one_hot(1) ``` Suppose that -- -- ``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value = -- 0.0 axis = -1 ``` -- -- Then output is `[2 x 2 x 3]`: -- -- ```output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // -- one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // -- one_hot(-1) ]``` oneHot :: (TensorType t, OneOf '[Int32, Int64, Word8] tI) => Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t oneHot' :: (TensorType t, OneOf '[Int32, Int64, Word8] tI) => OpParams -> Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t -- | Packs a list of N rank-R tensors into one -- rank-`(R+1)` tensor. -- -- Packs the N tensors in values into a tensor with -- rank one higher than each tensor in values, by packing them -- along the axis dimension. Given a list of tensors of shape -- `(A, B, C)`; -- -- if `axis == 0` then the output tensor will have the shape -- `(N, A, B, C)`. if `axis == 1` then the output tensor will -- have the shape `(A, N, B, C)`. Etc. -- -- For example: -- -- ```prettyprint # x is [1, 4] # y is [2, 5] # -- z is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # -- Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, -- 6]] ``` -- -- This is the opposite of unpack. pack :: (TensorType t) => [Tensor v'1 t] -> Tensor Build t pack' :: (TensorType t) => OpParams -> [Tensor v'1 t] -> Tensor Build t -- | Pads a tensor with zeros. -- -- This operation pads a input with zeros according to the -- paddings you specify. paddings is an integer tensor -- with shape `[Dn, 2]`, where n is the rank of input. For each -- dimension D of input, `paddings[D, 0]` indicates how many -- zeros to add before the contents of input in that dimension, -- and `paddings[D, 1]` indicates how many zeros to add after the -- contents of input in that dimension. -- -- The padded size of each dimension D of the output is: -- -- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` -- -- For example: -- -- ```prettyprint # t is [[1, 1], [2, 2]] # paddings is -- [[1, 1], [2, 2]] # rank of t is 2 pad(t, paddings) ==> -- [[0, 0, 0, 0, 0, 0] [0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, -- 0, 0]] ``` pad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t pad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t -- | A queue that produces elements in first-in first-out order. -- -- Variable-size shapes are allowed by setting the corresponding shape -- dimensions to 0 in the shape attr. In this case DequeueMany will pad -- up to the maximum size of any given element in the minibatch. See -- below for details. paddingFIFOQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) paddingFIFOQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) -- | A queue that produces elements in first-in first-out order. -- -- Variable-size shapes are allowed by setting the corresponding shape -- dimensions to 0 in the shape attr. In this case DequeueMany will pad -- up to the maximum size of any given element in the minibatch. See -- below for details. paddingFIFOQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle) paddingFIFOQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle) -- | Concatenates a list of N tensors along the first dimension. -- -- The input tensors are all required to have size 1 in the first -- dimension. -- -- For example: -- -- ```prettyprint # x is [[1, 4]] # y is [[2, 5]] # -- z is [[3, 6]] parallel_concat([x, y, z]) => [[1, 4], [2, -- 5], [3, 6]] # Pack along first dim. ``` -- -- The difference between concat and parallel_concat is that concat -- requires all of the inputs be computed before the operation will begin -- but doesn't require that the input shapes be known during graph -- construction. Parallel concat will copy pieces of the input into the -- output as they become available, in some situations this can provide a -- performance benefit. parallelConcat :: (TensorType t) => Shape -> [Tensor v'1 t] -> Tensor Build t parallelConcat' :: (TensorType t) => OpParams -> Shape -> [Tensor v'1 t] -> Tensor Build t -- | Outputs random values from a normal distribution. The parameters may -- each be a -- -- scalar which applies to the entire output, or a vector of length -- shape[0] which stores the parameters for each batch. parameterizedTruncatedNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 dtype -> Tensor v'3 dtype -> Tensor v'4 dtype -> Tensor v'5 dtype -> m' (Tensor Value dtype) parameterizedTruncatedNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 dtype -> Tensor v'3 dtype -> Tensor v'4 dtype -> Tensor v'5 dtype -> m' (Tensor Value dtype) -- | Transforms a vector of brain.Example protos (as strings) into typed -- tensors. parseExample :: (OneOfs '[ByteString, Int64, Float] sparse_types, OneOfs '[ByteString, Int64, Float] tdense) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> TensorList (v'5) tdense -> ([Tensor Build Int64], TensorList (Build) sparse_types, [Tensor Build Int64], TensorList (Build) tdense) parseExample' :: (OneOfs '[ByteString, Int64, Float] sparse_types, OneOfs '[ByteString, Int64, Float] tdense) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> TensorList (v'5) tdense -> ([Tensor Build Int64], TensorList (Build) sparse_types, [Tensor Build Int64], TensorList (Build) tdense) -- | Transforms a scalar brain.SequenceExample proto (as strings) into -- typed tensors. parseSingleSequenceExample :: (OneOfs '[ByteString, Int64, Float] context_sparse_types, OneOfs '[ByteString, Int64, Float] tcontext_dense, OneOfs '[ByteString, Int64, Float] feature_list_dense_types, OneOfs '[ByteString, Int64, Float] feature_list_sparse_types) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> [Tensor v'5 ByteString] -> [Tensor v'6 ByteString] -> TensorList (v'7) tcontext_dense -> Tensor v'8 ByteString -> ([Tensor Build Int64], TensorList (Build) context_sparse_types, [Tensor Build Int64], TensorList (Build) tcontext_dense, [Tensor Build Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Int64], TensorList (Build) feature_list_dense_types) parseSingleSequenceExample' :: (OneOfs '[ByteString, Int64, Float] context_sparse_types, OneOfs '[ByteString, Int64, Float] tcontext_dense, OneOfs '[ByteString, Int64, Float] feature_list_dense_types, OneOfs '[ByteString, Int64, Float] feature_list_sparse_types) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> [Tensor v'5 ByteString] -> [Tensor v'6 ByteString] -> TensorList (v'7) tcontext_dense -> Tensor v'8 ByteString -> ([Tensor Build Int64], TensorList (Build) context_sparse_types, [Tensor Build Int64], TensorList (Build) tcontext_dense, [Tensor Build Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Int64], TensorList (Build) feature_list_dense_types) -- | Transforms a serialized tensorflow.TensorProto proto into a Tensor. parseTensor :: (TensorType out_type) => Tensor v'1 ByteString -> Tensor Build out_type parseTensor' :: (TensorType out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type -- | A placeholder op for a value that will be fed into the computation. -- -- N.B. This operation will fail with an error if it is executed. It is -- intended as a way to represent a value that will always be fed, and to -- provide attrs that enable the fed value to be checked at runtime. placeholder :: (TensorType dtype) => Tensor Build dtype placeholder' :: (TensorType dtype) => OpParams -> Tensor Build dtype -- | A placeholder op for a value that will be fed into the computation. -- -- N.B. This operation will fail with an error if it is executed. It is -- intended as a way to represent a value that will always be fed, and to -- provide attrs that enable the fed value to be checked at runtime. placeholderV2 :: (TensorType dtype) => Shape -> Tensor Build dtype placeholderV2' :: (TensorType dtype) => OpParams -> Shape -> Tensor Build dtype -- | A placeholder op that passes through input when its output is -- not fed. placeholderWithDefault :: (TensorType dtype) => Shape -> Tensor v'1 dtype -> Tensor Build dtype placeholderWithDefault' :: (TensorType dtype) => OpParams -> Shape -> Tensor v'1 dtype -> Tensor Build dtype -- | Compute the polygamma function \(psi^{(n)}(x)\). -- -- The polygamma function is defined as: -- -- ``` psi^{(n)}(x) = frac{d^n}{dx^n} psi(x) ``` where \(psi(x)\) is the -- digamma function. polygamma :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t polygamma' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes the power of one value to another. -- -- Given a tensor x and a tensor y, this operation -- computes \(x^y\) for corresponding elements in x and -- y. For example: -- -- ``` # tensor x is [[2, 2]], [3, 3]] # tensor y is -- [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]] ``` pow :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t pow' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | An identity op that triggers an error if a gradient is requested. -- -- When executed in a graph, this op outputs its input tensor as-is. -- -- When building ops to compute gradients, the TensorFlow gradient system -- will return an error when trying to lookup the gradient of this op, -- because no gradient must ever be registered for this function. This op -- exists to prevent subtle bugs from silently returning unimplemented -- gradients in some corner cases. preventGradient :: (TensorType t) => Tensor v'1 t -> Tensor Build t preventGradient' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Prints a list of tensors. -- -- Passes input through to output and prints `data` -- when evaluating. print :: (MonadBuild m', TensorType t, TensorTypes u) => Tensor v'1 t -> TensorList (v'2) u -> m' (Tensor Value t) print' :: (MonadBuild m', TensorType t, TensorTypes u) => OpParams -> Tensor v'1 t -> TensorList (v'2) u -> m' (Tensor Value t) -- | A queue that produces elements sorted by the first component value. -- -- Note that the PriorityQueue requires the first component of any -- element to be a scalar int64, in addition to the other elements -- declared by component_types. Therefore calls to Enqueue and -- EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will -- all require (resp. output) one extra entry in their input (resp. -- output) lists. priorityQueue :: (MonadBuild m') => m' (Tensor Ref ByteString) priorityQueue' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) -- | A queue that produces elements sorted by the first component value. -- -- Note that the PriorityQueue requires the first component of any -- element to be a scalar int64, in addition to the other elements -- declared by component_types. Therefore calls to Enqueue and -- EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will -- all require (resp. output) one extra entry in their input (resp. -- output) lists. priorityQueueV2 :: (MonadBuild m') => m' (ResourceHandle) priorityQueueV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) -- | Computes the product of elements across dimensions of a tensor. -- -- Reduces input along the dimensions given in -- reduction_indices. Unless keep_dims is true, the -- rank of the tensor is reduced by 1 for each entry in -- reduction_indices. If keep_dims is true, the reduced -- dimensions are retained with length 1. prod :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t prod' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -- | Computes the QR decompositions of one or more matrices. -- -- Computes the QR decomposition of each inner matrix in tensor -- such that `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` -- -- ```prettyprint # a is a tensor. # q is a tensor of orthonormal -- matrices. # r is a tensor of upper triangular matrices. q, r = qr(a) -- q_full, r_full = qr(a, full_matrices=True) ``` qr :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t) qr' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t) -- | Quantizes then dequantizes a tensor. -- -- This op simulates the precision loss from the quantized forward pass -- by: 1. Quantizing the tensor to fixed point numbers, which should -- match the target quantization method when it is used in inference. 2. -- Dequantizing it back to floating point numbers for the following ops, -- most likely matmul. -- -- There are different ways to quantize. This version does not use the -- full range of the output type, choosing to elide the lowest possible -- value for symmetry (e.g., output range is -127 to 127, not -128 to 127 -- for signed 8 bit quantization), so that 0.0 maps to 0. -- -- To perform this op, we first find the range of values in our tensor. -- The range we use is always centered on 0, so we find m such that -- --
    --
  1. m = max(abs(input_min), abs(input_max)) if range_given is -- true,
  2. --
  3. m = max(max(abs(min_elem(input)), abs(max_elem(input))) -- otherwise.
  4. --
-- -- Our input tensor range is then [-m, m]. -- -- Next, we choose our fixed-point quantization buckets, [min_fixed, -- max_fixed]. If signed_input is true, this is -- -- -- -- Otherwise, if signed_input is false, the fixed-point range is -- -- -- -- From this we compute our scaling factor, s: -- -- s = (max_fixed - min_fixed) / (2 * m). -- -- Now we can quantize and dequantize the elements of our tensor. An -- element e is transformed into e': -- -- e' = (e * s).round_to_nearest() / s. -- -- Note that we have a different number of buckets in the signed vs. -- unsigned cases. For example, if num_bits == 8, we get 254 buckets in -- the signed case vs. 255 in the unsigned case. -- -- For example, suppose num_bits = 8 and m = 1. Then -- -- -- -- Given the vector {-1, -0.5, 0, 0.3}, this is quantized to {-127, -63, -- 0, 38}, and dequantized to {-1, -63.0127, 0, 38.0127}. quantizeAndDequantize :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t quantizeAndDequantize' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Convert the quantized input tensor into a lower-precision -- output, using the -- -- actual distribution of the values to maximize the usage of the lower -- bit depth and adjusting the output min and max ranges accordingly. -- -- -- -- This operator tries to squeeze as much precision as possible into an -- output with a lower bit depth by calculating the actual min and max -- values found in the data. For example, maybe that quint16 input has no -- values lower than 16,384 and none higher than 49,152. That means only -- half the range is actually needed, all the float interpretations are -- between -0.5f and 0.5f, so if we want to compress the data into a -- quint8 output, we can use that range rather than the theoretical -1.0f -- to 1.0f that is suggested by the input min and max. -- -- In practice, this is most useful for taking output from operations -- like QuantizedMatMul that can produce higher bit-depth outputs than -- their inputs and may have large potential output ranges, but in -- practice have a distribution of input values that only uses a small -- fraction of the possible range. By feeding that output into this -- operator, we can reduce it from 32 bits down to 8 with minimal loss of -- accuracy. quantizeDownAndShrinkRange :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) quantizeDownAndShrinkRange' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- | Quantize the input tensor of type float to output -- tensor of type T. -- -- -- -- In MIN_COMBINED mode, each value of the tensor will undergo -- the following: -- -- ``` out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) -- if T == qint8, out[i] -= (range(T) + 1) / 2.0 ``` here `range(T) = -- numeric_limitsT::max() - numeric_limitsT::min()` -- -- -- -- Assume the input is type float and has a possible range of [0.0, 6.0] -- and the output type is quint8 ([0, 255]). The min_range and max_range -- values should be specified as 0.0 and 6.0. Quantizing from float to -- quint8 will multiply each value of the input by 255/6 and cast to -- quint8. -- -- If the output type was qint8 ([-128, 127]), the operation will -- additionally subtract each value by 128 prior to casting, so that the -- range of values aligns with the range of qint8. -- -- If the mode is MIN_FIRST, then this approach is used: -- -- ``` number_of_steps = 1 << (# of bits in T) range_adjust = -- number_of_steps / (number_of_steps - 1) range = (range_max - -- range_min) * range_adjust range_scale = number_of_steps / range -- quantized = round(input * range_scale) - round(range_min * -- range_scale) + numeric_limitsT::min() quantized = -- max(quantized, numeric_limitsT::min()) quantized = -- min(quantized, numeric_limitsT::max()) ``` -- -- The biggest difference between this and MIN_COMBINED is that the -- minimum range is rounded first, before it's subtracted from the -- rounded value. With MIN_COMBINED, a small bias is introduced where -- repeated iterations of quantizing and dequantizing will introduce a -- larger and larger error. -- -- One thing to watch out for is that the operator may choose to adjust -- the requested minimum and maximum values slightly during the -- quantization process, so you should always use the output ports as the -- range for further calculations. For example, if the requested minimum -- and maximum values are close to equal, they will be separated by a -- small epsilon value to prevent ill-formed quantized buffers from being -- created. Otherwise, you can end up with buffers where all the -- quantized values map to the same float value, which causes problems -- for operations that have to perform further calculations on them. quantizeV2 :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) quantizeV2' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- | Produces the average pool of the input tensor for quantized types. quantizedAvgPool :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) quantizedAvgPool' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- | Quantized Batch normalization. -- -- This op is deprecated and will be removed in the future. Prefer -- `tf.nn.batch_normalization`. quantizedBatchNormWithGlobalNormalization :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Bool -> Float -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 tinput -> Tensor v'5 Float -> Tensor v'6 Float -> Tensor v'7 tinput -> Tensor v'8 Float -> Tensor v'9 Float -> Tensor v'10 tinput -> Tensor v'11 Float -> Tensor v'12 Float -> Tensor v'13 tinput -> Tensor v'14 Float -> Tensor v'15 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) quantizedBatchNormWithGlobalNormalization' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Bool -> Float -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 tinput -> Tensor v'5 Float -> Tensor v'6 Float -> Tensor v'7 tinput -> Tensor v'8 Float -> Tensor v'9 Float -> Tensor v'10 tinput -> Tensor v'11 Float -> Tensor v'12 Float -> Tensor v'13 tinput -> Tensor v'14 Float -> Tensor v'15 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- | Adds Tensor bias to Tensor input for Quantized -- types. -- -- Broadcasts the values of bias on dimensions 0..N-2 of input. quantizedBiasAdd :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) quantizedBiasAdd' :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- | Concatenates quantized tensors along one dimension. quantizedConcat :: (TensorType t) => Tensor v'1 Int32 -> [Tensor v'2 t] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> (Tensor Build t, Tensor Build Float, Tensor Build Float) quantizedConcat' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- | Computes a 2D convolution given quantized 4D input and filter tensors. -- -- The inputs are quantized tensors where the lowest value represents the -- real number of the associated minimum, and the highest represents the -- maximum. This means that you can only interpret the quantized output -- in the same way, by taking the returned minimum and maximum values -- into account. quantizedConv2D :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] tfilter, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 tfilter -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) quantizedConv2D' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] tfilter, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 tfilter -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- | Quantized Instance normalization. quantizedInstanceNorm :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) quantizedInstanceNorm' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- | Perform a quantized matrix multiplication of a by the matrix -- b. -- -- The inputs must be two-dimensional matrices and the inner dimension of -- a (after being transposed if transpose_a is -- non-zero) must match the outer dimension of b (after being -- transposed if transposed_b is non-zero). quantizedMatMul :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) => Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) quantizedMatMul' :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) => OpParams -> Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -- | Produces the max pool of the input tensor for quantized types. quantizedMaxPool :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) quantizedMaxPool' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- | Computes Quantized Rectified Linear: `max(features, 0)` quantizedRelu :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) quantizedRelu' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- | Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` quantizedRelu6 :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) quantizedRelu6' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- | Computes Quantized Rectified Linear X: `min(max(features, 0), -- max_value)` quantizedReluX :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) quantizedReluX' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- | Reshapes a quantized tensor as per the Reshape op. -- -- ``` quantizedReshape :: (TensorType t, OneOf '[Int32, Int64] tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) quantizedReshape' :: (TensorType t, OneOf '[Int32, Int64] tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- | Closes the given queue. -- -- This operation signals that no more elements will be enqueued in the -- given queue. Subsequent Enqueue(Many) operations will fail. Subsequent -- Dequeue(Many) operations will continue to succeed if sufficient -- elements remain in the queue. Subsequent Dequeue(Many) operations that -- would block will fail immediately. queueClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) queueClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) -- | Closes the given queue. -- -- This operation signals that no more elements will be enqueued in the -- given queue. Subsequent Enqueue(Many) operations will fail. Subsequent -- Dequeue(Many) operations will continue to succeed if sufficient -- elements remain in the queue. Subsequent Dequeue(Many) operations that -- would block will fail immediately. queueCloseV2 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode) queueCloseV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode) -- | Dequeues a tuple of one or more tensors from the given queue. -- -- This operation has k outputs, where k is the number of components in -- the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. -- -- N.B. If the queue is empty, this operation will block until an element -- has been dequeued (or timeout_ms elapses, if specified). queueDequeue :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> m' (TensorList (Value) component_types) queueDequeue' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> m' (TensorList (Value) component_types) -- | Dequeues n tuples of one or more tensors from the given queue. -- -- If the queue is closed and there are fewer than n elements, then an -- OutOfRange error is returned. -- -- This operation concatenates queue-element component tensors along the -- 0th dimension to make a single component tensor. All of the components -- in the dequeued tuple will have size n in the 0th dimension. -- -- This operation has k outputs, where k is the number of components in -- the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. -- -- N.B. If the queue is empty, this operation will block until n elements -- have been dequeued (or timeout_ms elapses, if specified). queueDequeueMany :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) queueDequeueMany' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) -- | Dequeues n tuples of one or more tensors from the given queue. -- -- If the queue is closed and there are fewer than n elements, then an -- OutOfRange error is returned. -- -- This operation concatenates queue-element component tensors along the -- 0th dimension to make a single component tensor. All of the components -- in the dequeued tuple will have size n in the 0th dimension. -- -- This operation has k outputs, where k is the number of components in -- the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. -- -- N.B. If the queue is empty, this operation will block until n elements -- have been dequeued (or timeout_ms elapses, if specified). queueDequeueManyV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) queueDequeueManyV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) -- | Dequeues n tuples of one or more tensors from the given queue. -- -- This operation is not supported by all queues. If a queue does not -- support DequeueUpTo, then an Unimplemented error is returned. -- -- If the queue is closed and there are more than 0 but less than n -- elements remaining, then instead of returning an OutOfRange error like -- QueueDequeueMany, less than n elements are returned -- immediately. If the queue is closed and there are 0 elements left in -- the queue, then an OutOfRange error is returned just like in -- QueueDequeueMany. Otherwise the behavior is identical to -- QueueDequeueMany: -- -- This operation concatenates queue-element component tensors along the -- 0th dimension to make a single component tensor. All of the components -- in the dequeued tuple will have size n in the 0th dimension. -- -- This operation has k outputs, where k is the number of components in -- the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. queueDequeueUpTo :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) queueDequeueUpTo' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) -- | Dequeues n tuples of one or more tensors from the given queue. -- -- This operation is not supported by all queues. If a queue does not -- support DequeueUpTo, then an Unimplemented error is returned. -- -- If the queue is closed and there are more than 0 but less than n -- elements remaining, then instead of returning an OutOfRange error like -- QueueDequeueMany, less than n elements are returned -- immediately. If the queue is closed and there are 0 elements left in -- the queue, then an OutOfRange error is returned just like in -- QueueDequeueMany. Otherwise the behavior is identical to -- QueueDequeueMany: -- -- This operation concatenates queue-element component tensors along the -- 0th dimension to make a single component tensor. All of the components -- in the dequeued tuple will have size n in the 0th dimension. -- -- This operation has k outputs, where k is the number of components in -- the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. queueDequeueUpToV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) queueDequeueUpToV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) -- | Dequeues a tuple of one or more tensors from the given queue. -- -- This operation has k outputs, where k is the number of components in -- the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. -- -- N.B. If the queue is empty, this operation will block until an element -- has been dequeued (or timeout_ms elapses, if specified). queueDequeueV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> m' (TensorList (Value) component_types) queueDequeueV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> m' (TensorList (Value) component_types) -- | Enqueues a tuple of one or more tensors in the given queue. -- -- The components input has k elements, which correspond to the -- components of tuples stored in the given queue. -- -- N.B. If the queue is full, this operation will block until the given -- element has been enqueued (or timeout_ms elapses, if -- specified). queueEnqueue :: (MonadBuild m', TensorTypes tcomponents) => Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) queueEnqueue' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) -- | Enqueues zero or more tuples of one or more tensors in the given -- queue. -- -- This operation slices each component tensor along the 0th dimension to -- make multiple queue elements. All of the tuple components must have -- the same size in the 0th dimension. -- -- The components input has k elements, which correspond to the -- components of tuples stored in the given queue. -- -- N.B. If the queue is full, this operation will block until the given -- elements have been enqueued (or timeout_ms elapses, if -- specified). queueEnqueueMany :: (MonadBuild m', TensorTypes tcomponents) => Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) queueEnqueueMany' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) -- | Enqueues zero or more tuples of one or more tensors in the given -- queue. -- -- This operation slices each component tensor along the 0th dimension to -- make multiple queue elements. All of the tuple components must have -- the same size in the 0th dimension. -- -- The components input has k elements, which correspond to the -- components of tuples stored in the given queue. -- -- N.B. If the queue is full, this operation will block until the given -- elements have been enqueued (or timeout_ms elapses, if -- specified). queueEnqueueManyV2 :: (MonadBuild m', TensorTypes tcomponents) => ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) queueEnqueueManyV2' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) -- | Enqueues a tuple of one or more tensors in the given queue. -- -- The components input has k elements, which correspond to the -- components of tuples stored in the given queue. -- -- N.B. If the queue is full, this operation will block until the given -- element has been enqueued (or timeout_ms elapses, if -- specified). queueEnqueueV2 :: (MonadBuild m', TensorTypes tcomponents) => ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) queueEnqueueV2' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) -- | Computes the number of elements in the given queue. queueSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) queueSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) -- | Computes the number of elements in the given queue. queueSizeV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int32) queueSizeV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int32) -- | Converts one or more images from RGB to HSV. -- -- Outputs a tensor of the same shape as the images tensor, -- containing the HSV value of the pixels. The output is only well -- defined if the value in images are in `[0,1]`. -- -- `output[..., 0]` contains hue, `output[..., 1]` contains saturation, -- and `output[..., 2]` contains value. All HSV values are in `[0,1]`. A -- hue of 0 corresponds to pure red, hue 13 is pure green, and 23 -- is pure blue. rGBToHSV :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t rGBToHSV' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Randomly crop image. -- -- size is a 1-D int64 tensor with 2 elements representing the -- crop height and width. The values must be non negative. -- -- This Op picks a random location in image and crops a -- height by width rectangle from that location. The -- random location is picked so the cropped area will fit inside the -- original image. randomCrop :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int64 -> m' (Tensor Value t) randomCrop' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> m' (Tensor Value t) -- | Outputs random values from the Gamma distribution(s) described by -- alpha. -- -- This op uses the algorithm by Marsaglia et al. to acquire samples via -- transformation-rejection from pairs of uniform and normal random -- variables. See http://dl.acm.org/citation.cfm?id=358414 randomGamma :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] t) => Tensor v'1 s -> Tensor v'2 t -> m' (Tensor Value t) randomGamma' :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 s -> Tensor v'2 t -> m' (Tensor Value t) -- | Randomly shuffles a tensor along its first dimension. -- -- The tensor is shuffled along dimension 0, such that each `value[j]` is -- mapped to one and only one `output[i]`. For example, a mapping that -- might occur for a 3x2 tensor is: -- -- ```prettyprint [[1, 2], [[5, 6], [3, 4], ==> [1, 2], [5, 6]] [3, -- 4]] ``` randomShuffle :: (MonadBuild m', TensorType t) => Tensor v'1 t -> m' (Tensor Value t) randomShuffle' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -> m' (Tensor Value t) -- | A queue that randomizes the order of elements. randomShuffleQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) randomShuffleQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) -- | A queue that randomizes the order of elements. randomShuffleQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle) randomShuffleQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle) -- | Outputs random values from a normal distribution. -- -- The generated values will have mean 0 and standard deviation 1. randomStandardNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype) randomStandardNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype) -- | Outputs random values from a uniform distribution. -- -- The generated values follow a uniform distribution in the range `[0, -- 1)`. The lower bound 0 is included in the range, while the upper bound -- 1 is excluded. randomUniform :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype) randomUniform' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype) -- | Outputs random integers from a uniform distribution. -- -- The generated values are uniform integers in the range `[minval, -- maxval)`. The lower bound minval is included in the range, -- while the upper bound maxval is excluded. -- -- The random integers are slightly biased unless `maxval - minval` is an -- exact power of two. The bias is small for values of `maxval - minval` -- significantly smaller than the range of the output (either `2^32` or -- `2^64`). randomUniformInt :: (MonadBuild m', OneOf '[Int32, Int64] tout, OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 tout -> Tensor v'3 tout -> m' (Tensor Value tout) randomUniformInt' :: (MonadBuild m', OneOf '[Int32, Int64] tout, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 tout -> Tensor v'3 tout -> m' (Tensor Value tout) -- | Creates a sequence of numbers. -- -- This operation creates a sequence of numbers that begins at -- start and extends by increments of delta up to but -- not including limit. -- -- For example: -- -- ``` # start is 3 # limit is 18 # delta is 3 -- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] ``` range :: (OneOf '[Int32, Int64, Double, Float] tidx) => Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx range' :: (OneOf '[Int32, Int64, Double, Float] tidx) => OpParams -> Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx -- | Returns the rank of a tensor. -- -- This operation returns an integer representing the rank of -- input. -- -- For example: -- -- ```prettyprint # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], -- [4, 4, 4]]] # shape of tensor t is [2, 2, 3] rank(t) ==> 3 -- ``` -- -- rank :: (TensorType t) => Tensor v'1 t -> Tensor Build Int32 rank' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Int32 -- | Reads and outputs the entire contents of the input filename. readFile :: Tensor v'1 ByteString -> Tensor Build ByteString readFile' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString -- | Reads the value of a variable. -- -- The tensor returned by this operation is immutable. -- -- The value returned by this operation is guaranteed to be influenced by -- all the writes on which this operation depends directly or indirectly, -- and to not be influenced by any of the writes which depend directly or -- indirectly on this operation. readVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> m' (Tensor Value dtype) readVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> m' (Tensor Value dtype) -- | Returns the number of records this Reader has produced. -- -- This is the same as the number of ReaderRead executions that have -- succeeded. readerNumRecordsProduced :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64) readerNumRecordsProduced' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64) -- | Returns the number of records this Reader has produced. -- -- This is the same as the number of ReaderRead executions that have -- succeeded. readerNumRecordsProducedV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int64) readerNumRecordsProducedV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int64) -- | Returns the number of work units this Reader has finished processing. readerNumWorkUnitsCompleted :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64) readerNumWorkUnitsCompleted' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64) -- | Returns the number of work units this Reader has finished processing. readerNumWorkUnitsCompletedV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int64) readerNumWorkUnitsCompletedV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int64) -- | Returns the next record (key, value pair) produced by a Reader. -- -- Will dequeue from the input queue if necessary (e.g. when the Reader -- needs to start reading from a new file since it has finished with the -- previous file). readerRead :: (MonadBuild m') => Tensor Ref ByteString -> Tensor Ref ByteString -> m' ((Tensor Value ByteString, Tensor Value ByteString)) readerRead' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor Ref ByteString -> m' ((Tensor Value ByteString, Tensor Value ByteString)) -- | Returns up to num_records (key, value) pairs produced by a -- Reader. -- -- Will dequeue from the input queue if necessary (e.g. when the Reader -- needs to start reading from a new file since it has finished with the -- previous file). It may return less than num_records even -- before the last batch. readerReadUpTo :: (MonadBuild m') => Tensor Ref ByteString -> Tensor Ref ByteString -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) readerReadUpTo' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor Ref ByteString -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) -- | Returns up to num_records (key, value) pairs produced by a -- Reader. -- -- Will dequeue from the input queue if necessary (e.g. when the Reader -- needs to start reading from a new file since it has finished with the -- previous file). It may return less than num_records even -- before the last batch. readerReadUpToV2 :: (MonadBuild m') => ResourceHandle -> ResourceHandle -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) readerReadUpToV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) -- | Returns the next record (key, value pair) produced by a Reader. -- -- Will dequeue from the input queue if necessary (e.g. when the Reader -- needs to start reading from a new file since it has finished with the -- previous file). readerReadV2 :: (MonadBuild m') => ResourceHandle -> ResourceHandle -> m' ((Tensor Value ByteString, Tensor Value ByteString)) readerReadV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> ResourceHandle -> m' ((Tensor Value ByteString, Tensor Value ByteString)) -- | Restore a Reader to its initial clean state. readerReset :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) readerReset' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) -- | Restore a Reader to its initial clean state. readerResetV2 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode) readerResetV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode) -- | Restore a reader to a previously saved state. -- -- Not all Readers support being restored, so this can produce an -- Unimplemented error. readerRestoreState :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) readerRestoreState' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) -- | Restore a reader to a previously saved state. -- -- Not all Readers support being restored, so this can produce an -- Unimplemented error. readerRestoreStateV2 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 ByteString -> m' (ControlNode) readerRestoreStateV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 ByteString -> m' (ControlNode) -- | Produce a string tensor that encodes the state of a Reader. -- -- Not all Readers support being serialized, so this can produce an -- Unimplemented error. readerSerializeState :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value ByteString) readerSerializeState' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value ByteString) -- | Produce a string tensor that encodes the state of a Reader. -- -- Not all Readers support being serialized, so this can produce an -- Unimplemented error. readerSerializeStateV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value ByteString) readerSerializeStateV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value ByteString) -- | Returns the real part of a complex number. -- -- Given a tensor input of complex numbers, this operation -- returns a tensor of type float that is the real part of each -- element in input. All elements in input must be -- complex numbers of the form \(a + bj\), where *a* is the real part -- returned by this operation and *b* is the imaginary part. -- -- For example: -- -- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] -- tf.real(input) ==> [-2.25, 3.25] ``` real :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout real' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout -- | Returns x / y element-wise for real types. -- -- If x and y are reals, this will return the -- floating-point division. -- -- realDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t realDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes the reciprocal of x element-wise. -- -- I.e., \(y = 1 / x\). reciprocal :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t reciprocal' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the gradient for the inverse of x wrt its input. -- -- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy is -- the corresponding input gradient. reciprocalGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t reciprocalGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Emits randomized records. recordInput :: (MonadBuild m') => m' (Tensor Value ByteString) recordInput' :: (MonadBuild m') => OpParams -> m' (Tensor Value ByteString) -- | Joins a string Tensor across the given dimensions. -- -- Computes the string join across dimensions in the given string Tensor -- of shape `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by -- joining the input strings with the given separator (default: empty -- string). Negative indices are counted backwards from the end, with -- `-1` being equivalent to `n - 1`. -- -- For example: -- -- ``` # tensor a is [["a", "b"], ["c", "d"]] tf.reduce_join(a, -- 0) ==> ["ac", "bd"] tf.reduce_join(a, 1) ==> ["ab", "cd"] -- tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] -- tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] -- tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] -- tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] -- tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] -- tf.reduce_join(a, [0, 1]) ==> ["acbd"] tf.reduce_join(a, [1, 0]) -- ==> ["abcd"] tf.reduce_join(a, []) ==> ["abcd"] ``` reduceJoin :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString reduceJoin' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString -- | Creates or finds a child frame, and makes `data` available to the -- child frame. -- -- The unique frame_name is used by the Executor to -- identify frames. If is_constant is true, output is a -- constant in the child frame; otherwise it may be changed in the child -- frame. At most parallel_iterations iterations are run in -- parallel in the child frame. refEnter :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) refEnter' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) -- | Exits the current frame to its parent frame. -- -- Exit makes its input `data` available to the parent frame. refExit :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) refExit' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) -- | Return the same ref tensor as the input ref tensor. refIdentity :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) refIdentity' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) -- | Forwards the value of an available tensor from inputs to -- output. -- -- Merge waits for at least one of the tensors in -- inputs to become available. It is usually combined with -- Switch to implement branching. -- -- Merge forwards the first tensor for become available to -- output, and sets value_index to its index in -- inputs. refMerge :: (MonadBuild m', TensorType t) => [Tensor Ref t] -> m' ((Tensor Ref t, Tensor Value Int32)) refMerge' :: (MonadBuild m', TensorType t) => OpParams -> [Tensor Ref t] -> m' ((Tensor Ref t, Tensor Value Int32)) -- | Makes its input available to the next iteration. refNextIteration :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) refNextIteration' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) -- | Forwards the indexth element of inputs to -- output. refSelect :: (MonadBuild m', TensorType t) => Tensor v'1 Int32 -> [Tensor Ref t] -> m' (Tensor Ref t) refSelect' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor Ref t] -> m' (Tensor Ref t) -- | Forwards the ref tensor `data` to the output port determined by -- pred. -- -- If pred is true, the `data` input is forwarded to -- output_true. Otherwise, the data goes to -- output_false. -- -- See also Switch and Merge. refSwitch :: (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 Bool -> m' ((Tensor Ref t, Tensor Ref t)) refSwitch' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 Bool -> m' ((Tensor Ref t, Tensor Ref t)) -- | Computes rectified linear: `max(features, 0)`. relu :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t relu' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes rectified linear 6: `min(max(features, 0), 6)`. relu6 :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t relu6' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes rectified linear 6 gradients for a Relu6 operation. relu6Grad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t relu6Grad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes rectified linear gradients for a Relu operation. reluGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t reluGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Given a quantized tensor described by (input, input_min, input_max), -- outputs a -- -- range that covers the actual values present in that tensor. This op is -- typically used to produce the requested_output_min and -- requested_output_max for Requantize. requantizationRange :: (OneOf '[Int16, Int32, Word16, Word8] tinput) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build Float, Tensor Build Float) requantizationRange' :: (OneOf '[Int16, Int32, Word16, Word8] tinput) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build Float, Tensor Build Float) -- | Convert the quantized input tensor into a lower-precision -- output, using the -- -- output range specified with requested_output_min and -- requested_output_max. -- -- requantize :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) requantize' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- | Reshapes a tensor. -- -- Given tensor, this operation returns a tensor that has the -- same values as tensor with shape shape. -- -- If one component of shape is the special value -1, the size of -- that dimension is computed so that the total size remains constant. In -- particular, a shape of `[-1]` flattens into 1-D. At most one -- component of shape can be -1. -- -- If shape is 1-D or higher, then the operation returns a tensor -- with shape shape filled with the values of tensor. In -- this case, the number of elements implied by shape must be the -- same as the number of elements in tensor. -- -- For example: -- -- ```prettyprint # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] # -- tensor t has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3], -- [4, 5, 6], [7, 8, 9]] -- -- # tensor t is [[[1, 1], [2, 2]], # [[3, 3], [4, 4]]] # tensor -- t has shape [2, 2, 2] reshape(t, [2, 4]) ==> [[1, 1, 2, -- 2], [3, 3, 4, 4]] -- -- # tensor t is [[[1, 1, 1], # [2, 2, 2]], # [[3, 3, 3], # [4, -- 4, 4]], # [[5, 5, 5], # [6, 6, 6]]] # tensor t has shape [3, -- 2, 3] # pass '[-1]' to flatten t reshape(t, [-1]) ==> [1, -- 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] -- -- # -1 can also be used to infer the shape -- -- # -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, -- 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2: -- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, -- 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3]) -- ==> [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, -- 6, 6]]] -- -- # tensor t is [7] # shape `[]` reshapes to a scalar -- reshape(t, []) ==> 7 ``` reshape :: (TensorType t, OneOf '[Int32, Int64] tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t reshape' :: (TensorType t, OneOf '[Int32, Int64] tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t -- | Resize images to size using area interpolation. -- -- Input images can be of different types but output images are always -- float. resizeArea :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float resizeArea' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float -- | Resize images to size using bicubic interpolation. -- -- Input images can be of different types but output images are always -- float. resizeBicubic :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float resizeBicubic' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float -- | Resize images to size using bilinear interpolation. -- -- Input images can be of different types but output images are always -- float. resizeBilinear :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float resizeBilinear' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float -- | Computes the gradient of bilinear interpolation. resizeBilinearGrad :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 t -> Tensor Build t resizeBilinearGrad' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build t -- | Resize images to size using nearest neighbor -- interpolation. resizeNearestNeighbor :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t resizeNearestNeighbor' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t -- | Computes the gradient of nearest neighbor interpolation. resizeNearestNeighborGrad :: (OneOf '[Int32, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t resizeNearestNeighborGrad' :: (OneOf '[Int32, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t -- | Update '*var' according to the adadelta scheme. -- -- accum = rho() * accum + (1 - rho()) * grad.square(); update = -- (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; -- update_accum = rho() * update_accum + (1 - rho()) * update.square(); -- var -= update; resourceApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (ControlNode) resourceApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (ControlNode) -- | Update '*var' according to the adagrad scheme. -- -- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) resourceApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> m' (ControlNode) resourceApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> m' (ControlNode) -- | Update '*var' according to the proximal adagrad scheme. resourceApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (ControlNode) resourceApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (ControlNode) -- | Update '*var' according to the Adam algorithm. -- -- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- -- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - -- beta2) * g_t * g_t variable <- variable - lr_t * m_t / (sqrt(v_t) + -- epsilon) resourceApplyAdam :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (ControlNode) resourceApplyAdam' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (ControlNode) -- | Update '*var' according to the centered RMSProp algorithm. -- -- The centered RMSProp algorithm uses an estimate of the centered second -- moment (i.e., the variance) for normalization, as opposed to regular -- RMSProp, which uses the (uncentered) second moment. This often helps -- with training, but is slightly more expensive in terms of computation -- and memory. -- -- Note that in dense implementation of this algorithm, mg, ms, and mom -- will update even if the grad is zero, but in this sparse -- implementation, mg, ms, and mom will not update in iterations during -- which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- mean_grad = decay * mean_grad + (1-decay) * gradient -- -- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - -- mean_grad ** 2) -- -- mg <- rho * mg_{t-1} + (1-rho) * grad ms <- rho * ms_{t-1} + -- (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / -- sqrt(ms - mg * mg + epsilon) var <- var - mom resourceApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) resourceApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) -- | Update '*var' according to the Ftrl-proximal scheme. -- -- accum_new = accum + grad * grad linear += grad + -- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 -- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - -- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new resourceApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) resourceApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) -- | Update '*var' by subtracting alpha * delta from it. resourceApplyGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> m' (ControlNode) resourceApplyGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> m' (ControlNode) -- | Update '*var' according to the momentum scheme. Set use_nesterov = -- True if you -- -- want to use Nesterov momentum. -- -- accum = accum * momentum + grad var -= lr * accum resourceApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) resourceApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) -- | Update '*var' and '*accum' according to FOBOS with Adagrad learning -- rate. -- -- accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var -- = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} resourceApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (ControlNode) resourceApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (ControlNode) -- | Update '*var' as FOBOS algorithm with fixed learning rate. -- -- prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) * -- max{|prox_v|-alpha*l1,0} resourceApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) resourceApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) -- | Update '*var' according to the RMSProp algorithm. -- -- Note that in dense implementation of this algorithm, ms and mom will -- update even if the grad is zero, but in this sparse implementation, ms -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = -- learning_rate * gradient / sqrt(mean_square + epsilon) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * -- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom resourceApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) resourceApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) -- | Gather slices from the variable pointed to by resource -- according to indices. -- -- indices must be an integer tensor of any dimension (usually -- 0-D or 1-D). Produces an output tensor with shape `indices.shape + -- params.shape[1:]` where: -- -- ```python # Scalar indices output[:, ..., :] = params[indices, :, ... -- :] -- -- # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] -- -- # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, -- ..., j], :, ..., :] ``` resourceGather :: (MonadBuild m', TensorType dtype, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 tindices -> m' (Tensor Value dtype) resourceGather' :: (MonadBuild m', TensorType dtype, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 tindices -> m' (Tensor Value dtype) -- | Adds sparse updates to the variable referenced by resource. -- -- This operation computes -- -- # Scalar indices ref[indices, ...] += updates[...] -- -- # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] -- -- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] -- += updates[i, ..., j, ...] -- -- Duplicate entries are handled correctly: if multiple indices -- reference the same location, their contributions add. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/ScatterAdd.png" alt /div resourceScatterAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 tindices -> Tensor v'3 dtype -> m' (ControlNode) resourceScatterAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 tindices -> Tensor v'3 dtype -> m' (ControlNode) -- | var: Should be from a Variable(). resourceSparseApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (ControlNode) resourceSparseApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (ControlNode) -- | Update relevant entries in '*var' and '*accum' according to the -- adagrad scheme. -- -- That is for rows we have grad for, we update var and accum as follows: -- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) resourceSparseApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (ControlNode) resourceSparseApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (ControlNode) -- | Update entries in '*var' and '*accum' according to the proximal -- adagrad scheme. resourceSparseApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (ControlNode) resourceSparseApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (ControlNode) -- | Update '*var' according to the centered RMSProp algorithm. -- -- The centered RMSProp algorithm uses an estimate of the centered second -- moment (i.e., the variance) for normalization, as opposed to regular -- RMSProp, which uses the (uncentered) second moment. This often helps -- with training, but is slightly more expensive in terms of computation -- and memory. -- -- Note that in dense implementation of this algorithm, mg, ms, and mom -- will update even if the grad is zero, but in this sparse -- implementation, mg, ms, and mom will not update in iterations during -- which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- mean_grad = decay * mean_grad + (1-decay) * gradient Delta = -- learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** -- 2) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * -- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom resourceSparseApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (ControlNode) resourceSparseApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (ControlNode) -- | Update relevant entries in '*var' according to the Ftrl-proximal -- scheme. -- -- That is for rows we have grad for, we update var, accum and linear as -- follows: accum_new = accum + grad * grad linear += grad + -- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 -- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - -- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new resourceSparseApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) resourceSparseApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) -- | Update relevant entries in '*var' and '*accum' according to the -- momentum scheme. -- -- Set use_nesterov = True if you want to use Nesterov momentum. -- -- That is for rows we have grad for, we update var and accum as follows: -- -- accum = accum * momentum + grad var -= lr * accum resourceSparseApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (ControlNode) resourceSparseApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (ControlNode) -- | Sparse update entries in '*var' and '*accum' according to FOBOS -- algorithm. -- -- That is for rows we have grad for, we update var and accum as follows: -- accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / -- sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} resourceSparseApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (ControlNode) resourceSparseApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (ControlNode) -- | Sparse update '*var' as FOBOS algorithm with fixed learning rate. -- -- That is for rows we have grad for, we update var as follows: prox_v = -- var - alpha * grad var = sign(prox_v)/(1+alpha*l2) * -- max{|prox_v|-alpha*l1,0} resourceSparseApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (ControlNode) resourceSparseApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (ControlNode) -- | Update '*var' according to the RMSProp algorithm. -- -- Note that in dense implementation of this algorithm, ms and mom will -- update even if the grad is zero, but in this sparse implementation, ms -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = -- learning_rate * gradient / sqrt(mean_square + epsilon) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * -- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom resourceSparseApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (ControlNode) resourceSparseApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (ControlNode) -- | Restores a tensor from checkpoint files. -- -- Reads a tensor stored in one or several files. If there are several -- files (for instance because a tensor was saved as slices), -- file_pattern may contain wildcard symbols (* and -- ?) in the filename portion only, not in the directory -- portion. -- -- If a file_pattern matches several files, -- preferred_shard can be used to hint in which file the -- requested tensor is likely to be found. This op will first open the -- file at index preferred_shard in the list of matching files -- and try to restore tensors from that file. Only if some tensors or -- tensor slices are not found in that first file, then the Op opens all -- the files. Setting preferred_shard to match the value passed -- as the shard input of a matching Save Op may speed -- up Restore. This attribute only affects performance, not correctness. -- The default value -1 means files are processed in order. -- -- See also RestoreSlice. restore :: (TensorType dt) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor Build dt restore' :: (TensorType dt) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor Build dt -- | Restores a tensor from checkpoint files. -- -- This is like Restore except that restored tensor can be -- listed as filling only a slice of a larger tensor. -- shape_and_slice specifies the shape of the larger tensor and -- the slice that the restored tensor covers. -- -- The shape_and_slice input has the same format as the elements -- of the shapes_and_slices input of the SaveSlices op. restoreSlice :: (TensorType dt) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> Tensor Build dt restoreSlice' :: (TensorType dt) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> Tensor Build dt -- | Restores tensors from a V2 checkpoint. -- -- For backward compatibility with the V1 format, this Op currently -- allows restoring from a V1 checkpoint as well: - This Op first -- attempts to find the V2 index file pointed to by "prefix", and if -- found proceed to read it as a V2 checkpoint; - Otherwise the V1 read -- path is invoked. Relying on this behavior is not recommended, as the -- ability to fall back to read V1 might be deprecated and eventually -- removed. -- -- By default, restores the named tensors in full. If the caller wishes -- to restore specific slices of stored tensors, "shape_and_slices" -- should be non-empty strings and correspondingly well-formed. -- -- Callers must ensure all the named tensors are indeed stored in the -- checkpoint. restoreV2 :: (TensorTypes dtypes) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (Build) dtypes restoreV2' :: (TensorTypes dtypes) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (Build) dtypes -- | Reverses specific dimensions of a tensor. -- -- Given a tensor, and a bool tensor dims -- representing the dimensions of tensor, this operation -- reverses each dimension i of tensor where `dims[i]` is -- True. -- -- tensor can have up to 8 dimensions. The number of dimensions -- of tensor must equal the number of elements in dims. -- In other words: -- -- `rank(tensor) = size(dims)` -- -- For example: -- -- ```prettyprint # tensor t is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, -- 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # -- [20, 21, 22, 23]]]] # tensor t shape is [1, 2, 3, 4] -- -- # dims is [False, False, False, True] reverse(t, dims) ==> -- [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], -- [19, 18, 17, 16], [23, 22, 21, 20]]]] -- -- # dims is [False, True, False, False] reverse(t, dims) ==> -- [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, 1, 2, -- 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]] -- -- # dims is [False, False, True, False] reverse(t, dims) ==> -- [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, 23], [16, -- 17, 18, 19], [12, 13, 14, 15]]]] ``` reverse :: (OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Bool -> Tensor Build t reverse' :: (OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Bool -> Tensor Build t -- | Reverses variable length slices. -- -- This op first slices input along the dimension -- batch_dim, and for each slice i, reverses the first -- `seq_lengths[i]` elements along the dimension seq_dim. -- -- The elements of seq_lengths must obey `seq_lengths[i] < -- input.dims[seq_dim]`, and seq_lengths must be a vector of -- length `input.dims[batch_dim]`. -- -- The output slice i along dimension batch_dim is then -- given by input slice i, with the first `seq_lengths[i]` -- slices along dimension seq_dim reversed. -- -- For example: -- -- ```prettyprint # Given this: batch_dim = 0 seq_dim = 1 input.dims = -- (4, 8, ...) seq_lengths = [7, 2, 3, 5] -- -- # then slices of input are reversed on seq_dim, but only up to -- seq_lengths: output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] -- output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] output[2, 0:3, :, -- ...] = input[2, 3:0:-1, :, ...] output[3, 0:5, :, ...] = input[3, -- 5:0:-1, :, ...] -- -- # while entries past seq_lens are copied through: output[0, 7:, :, -- ...] = input[0, 7:, :, ...] output[1, 2:, :, ...] = input[1, 2:, :, -- ...] output[2, 3:, :, ...] = input[2, 3:, :, ...] output[3, 2:, :, -- ...] = input[3, 2:, :, ...] ``` -- -- In contrast, if: -- -- ```prettyprint # Given this: batch_dim = 2 seq_dim = 0 input.dims = -- (8, ?, 4, ...) seq_lengths = [7, 2, 3, 5] -- -- # then slices of input are reversed on seq_dim, but only up to -- seq_lengths: output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] -- output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] output[0:3, :, -- 2, :, ...] = input[3:0:-1, :, 2, :, ...] output[0:5, :, 3, :, ...] = -- input[5:0:-1, :, 3, :, ...] -- -- # while entries past seq_lens are copied through: output[7:, :, 0, :, -- ...] = input[7:, :, 0, :, ...] output[2:, :, 1, :, ...] = input[2:, :, -- 1, :, ...] output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] -- output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] ``` reverseSequence :: (TensorType t, OneOf '[Int32, Int64] tlen) => Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor Build t reverseSequence' :: (TensorType t, OneOf '[Int32, Int64] tlen) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor Build t -- | Reverses specific dimensions of a tensor. -- -- NOTE `tf.reverse` has now changed behavior in preparation for 1.0. -- `tf.reverse_v2` is currently an alias that will be deprecated before -- TF 1.0. -- -- Given a tensor, and a int32 tensor axis -- representing the set of dimensions of tensor to reverse. This -- operation reverses each dimension i for which there exists -- j s.t. `axis[j] == i`. -- -- tensor can have up to 8 dimensions. The number of dimensions -- specified in axis may be 0 or more entries. If an index is -- specified more than once, a InvalidArgument error is raised. -- -- For example: -- -- ```prettyprint # tensor t is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, -- 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # -- [20, 21, 22, 23]]]] # tensor t shape is [1, 2, 3, 4] -- -- # dims is [3] or dims is -1 reverse(t, dims) ==> -- [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], -- [19, 18, 17, 16], [23, 22, 21, 20]]]] -- -- # dims is '[1]' (or dims is '[-3]') reverse(t, dims) -- ==> [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, -- 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]] -- -- # dims is '[2]' (or dims is '[-2]') reverse(t, dims) -- ==> [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, -- 23], [16, 17, 18, 19], [12, 13, 14, 15]]]] ``` reverseV2 :: (OneOf '[Int32, Int64] tidx, OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t reverseV2' :: (OneOf '[Int32, Int64] tidx, OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -- | Returns element-wise integer closest to x. -- -- If the result is midway between two representable values, the even -- representable is chosen. For example: -- -- ``` rint(-1.5) ==> -2.0 rint(0.5000001) ==> 1.0 rint([-1.7, -- -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., -- 2.] ``` rint :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t rint' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Rounds the values of a tensor to the nearest integer, element-wise. -- -- Rounds half to even. Also known as bankers rounding. If you want to -- round according to the current system rounding mode use std::cint. round :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t round' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes reciprocal of square root of x element-wise. -- -- I.e., \(y = 1 / sqrt{x}\). rsqrt :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t rsqrt' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the gradient for the rsqrt of x wrt its input. -- -- Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and -- dy is the corresponding input gradient. rsqrtGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t rsqrtGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Generate a single randomly distorted bounding box for an image. -- -- Bounding box annotations are often supplied in addition to -- ground-truth labels in image recognition or object localization tasks. -- A common technique for training such a system is to randomly distort -- an image while preserving its content, i.e. *data augmentation*. This -- Op outputs a randomly distorted localization of an object, i.e. -- bounding box, given an image_size, bounding_boxes -- and a series of constraints. -- -- The output of this Op is a single bounding box that may be used to -- crop the original image. The output is returned as 3 tensors: -- begin, size and bboxes. The first 2 tensors -- can be fed directly into `tf.slice` to crop the image. The latter may -- be supplied to `tf.image.draw_bounding_boxes` to visualize what the -- bounding box looks like. -- -- Bounding boxes are supplied and returned as `[y_min, x_min, y_max, -- x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` -- relative to the width and height of the underlying image. -- -- For example, -- -- ```python # Generate a single distorted bounding box. begin, size, -- bbox_for_draw = tf.image.sample_distorted_bounding_box( -- tf.shape(image), bounding_boxes=bounding_boxes) -- -- # Draw the bounding box in an image summary. image_with_box = -- tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) -- tf.image_summary(images_with_box, image_with_box) -- -- # Employ the bounding box to distort the image. distorted_image = -- tf.slice(image, begin, size) ``` -- -- Note that if no bounding box information is available, setting -- `use_image_if_no_bounding_boxes = true` will assume there is a single -- implicit bounding box covering the whole image. If -- use_image_if_no_bounding_boxes is false and no bounding boxes -- are supplied, an error is raised. sampleDistortedBoundingBox :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) sampleDistortedBoundingBox' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) -- | Saves the input tensors to disk. -- -- The size of tensor_names must match the number of tensors in -- `data`. `data[i]` is written to filename with name -- `tensor_names[i]`. -- -- See also SaveSlices. save :: (MonadBuild m', TensorTypes t) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> TensorList (v'3) t -> m' (ControlNode) save' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> TensorList (v'3) t -> m' (ControlNode) -- | Saves input tensors slices to disk. -- -- This is like Save except that tensors can be listed in the -- saved file as being a slice of a larger tensor. -- shapes_and_slices specifies the shape of the larger tensor -- and the slice that this tensor covers. shapes_and_slices must -- have as many elements as tensor_names. -- -- Elements of the shapes_and_slices input must either be: -- -- -- -- `slice-spec` itself is a :-separated list: -- `slice0:slice1:...:sliceN-1` where each sliceI is either: -- -- -- -- See also Save. saveSlices :: (MonadBuild m', TensorTypes t) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) t -> m' (ControlNode) saveSlices' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) t -> m' (ControlNode) -- | Saves tensors in V2 checkpoint format. -- -- By default, saves the named tensors in full. If the caller wishes to -- save specific slices of full tensors, "shape_and_slices" should be -- non-empty strings and correspondingly well-formed. saveV2 :: (MonadBuild m', TensorTypes dtypes) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) dtypes -> m' (ControlNode) saveV2' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) dtypes -> m' (ControlNode) -- | Outputs a Summary protocol buffer with scalar values. -- -- The input tags and values must have the same shape. -- The generated summary has a summary value for each tag-value pair in -- tags and values. scalarSummary :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString scalarSummary' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString -- | Adds sparse updates to a variable reference. -- -- This operation computes -- -- # Scalar indices ref[indices, ...] += updates[...] -- -- # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] -- -- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] -- += updates[i, ..., j, ...] -- -- This operation outputs ref after the update is done. This -- makes it easier to chain operations that need to use the reset value. -- -- Duplicate entries are handled correctly: if multiple indices -- reference the same location, their contributions add. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/ScatterAdd.png" alt /div scatterAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) scatterAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -- | Divides a variable reference by sparse updates. -- -- This operation computes -- -- # Scalar indices ref[indices, ...] /= updates[...] -- -- # Vector indices (for each i) ref[indices[i], ...] /= updates[i, ...] -- -- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] -- /= updates[i, ..., j, ...] -- -- This operation outputs ref after the update is done. This -- makes it easier to chain operations that need to use the reset value. -- -- Duplicate entries are handled correctly: if multiple indices -- reference the same location, their contributions divide. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. scatterDiv :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) scatterDiv' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -- | Multiplies sparse updates into a variable reference. -- -- This operation computes -- -- # Scalar indices ref[indices, ...] *= updates[...] -- -- # Vector indices (for each i) ref[indices[i], ...] *= updates[i, ...] -- -- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] -- *= updates[i, ..., j, ...] -- -- This operation outputs ref after the update is done. This -- makes it easier to chain operations that need to use the reset value. -- -- Duplicate entries are handled correctly: if multiple indices -- reference the same location, their contributions multiply. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. scatterMul :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) scatterMul' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -- | Creates a new tensor by applying sparse updates to individual -- -- values or slices within a zero tensor of the given shape tensor -- according to indices. This operator is the inverse of the -- tf.gather_nd operator which extracts values or slices from a -- given tensor. -- -- TODO(simister): Add a link to Variable.getitem documentation on -- slice syntax. -- -- shape is a TensorShape with rank P and -- indices is a Tensor of rank Q. -- -- indices must be integer tensor, containing indices into -- shape. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < -- K <= P`. -- -- The innermost dimension of indices (with length K) -- corresponds to indices into elements (if `K = P`) or slices (if `K -- < P`) along the Kth dimension of shape. -- -- updates is Tensor of rank `Q-1+P-K` with shape: -- -- ``` [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]]. ``` -- -- The simplest form of scatter is to insert individual elements in a -- tensor by index. For example, say we want to insert 4 scattered -- elements in a rank-1 tensor with 8 elements. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/ScatterNd1.png" alt /div -- -- In Python, this scatter operation would look like this: -- -- indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, -- 10, 11, 12]) shape = tf.constant([8]) scatter = tf.scatter_nd(indices, -- updates, shape) with tf.Session() as sess: print sess.run(scatter) -- -- The resulting tensor would look like this: -- -- -- -- We can also, insert entire slices of a higher rank tensor all at once. -- For example, if we wanted to insert two slices in the first dimension -- of a rank-3 tensor with two matrices of new values. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/ScatterNd2.png" alt /div -- -- In Python, this scatter operation would look like this: -- -- indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5, -- 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, -- 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) shape = tf.constant([4, 4, 4]) -- scatter = tf.scatter_nd(indices, updates, shape) with tf.Session() as -- sess: print sess.run(scatter) -- -- The resulting tensor would look like this: -- -- scatterNd :: (TensorType t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor Build t scatterNd' :: (TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor Build t -- | Applies sparse addition between updates and individual values -- or slices -- -- within a given variable according to indices. -- -- ref is a Tensor with rank P and -- indices is a Tensor of rank Q. -- -- indices must be integer tensor, containing indices into -- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < -- K <= P`. -- -- The innermost dimension of indices (with length K) -- corresponds to indices into elements (if `K = P`) or slices (if `K -- < P`) along the Kth dimension of ref. -- -- updates is Tensor of rank `Q-1+P-K` with shape: -- -- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` -- -- For example, say we want to add 4 scattered elements to a rank-1 -- tensor to 8 elements. In Python, that addition would look like this: -- -- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = -- tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, -- 12]) add = tf.scatter_nd_add(ref, indices, updates) with tf.Session() -- as sess: print sess.run(add) -- -- The resulting update to ref would look like this: -- -- -- -- See tf.scatter_nd for more details about how to make updates to -- slices. scatterNdAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) scatterNdAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -- | Applies sparse subtraction between updates and individual -- values or slices -- -- within a given variable according to indices. -- -- ref is a Tensor with rank P and -- indices is a Tensor of rank Q. -- -- indices must be integer tensor, containing indices into -- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < -- K <= P`. -- -- The innermost dimension of indices (with length K) -- corresponds to indices into elements (if `K = P`) or slices (if `K -- < P`) along the Kth dimension of ref. -- -- updates is Tensor of rank `Q-1+P-K` with shape: -- -- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` -- -- For example, say we want to subtract 4 scattered elements from a -- rank-1 tensor with 8 elements. In Python, that subtraction would look -- like this: -- -- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = -- tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, -- 12]) sub = tf.scatter_nd_sub(ref, indices, updates) with tf.Session() -- as sess: print sess.run(sub) -- -- The resulting update to ref would look like this: -- -- -- -- See tf.scatter_nd for more details about how to make updates to -- slices. scatterNdSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) scatterNdSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -- | Applies sparse updates to individual values or slices within -- a given -- -- variable according to indices. -- -- ref is a Tensor with rank P and -- indices is a Tensor of rank Q. -- -- indices must be integer tensor, containing indices into -- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < -- K <= P`. -- -- The innermost dimension of indices (with length K) -- corresponds to indices into elements (if `K = P`) or slices (if `K -- < P`) along the Kth dimension of ref. -- -- updates is Tensor of rank `Q-1+P-K` with shape: -- -- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` -- -- For example, say we want to update 4 scattered elements to a rank-1 -- tensor to 8 elements. In Python, that update would look like this: -- -- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = -- tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, -- 12]) update = tf.scatter_nd_update(ref, indices, updates) with -- tf.Session() as sess: print sess.run(update) -- -- The resulting update to ref would look like this: -- -- -- -- See tf.scatter_nd for more details about how to make updates to -- slices. scatterNdUpdate :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) scatterNdUpdate' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -- | Subtracts sparse updates to a variable reference. -- -- # Scalar indices ref[indices, ...] -= updates[...] -- -- # Vector indices (for each i) ref[indices[i], ...] -= updates[i, ...] -- -- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] -- -= updates[i, ..., j, ...] -- -- This operation outputs ref after the update is done. This -- makes it easier to chain operations that need to use the reset value. -- -- Duplicate entries are handled correctly: if multiple indices -- reference the same location, their (negated) contributions add. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/ScatterSub.png" alt /div scatterSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) scatterSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -- | Applies sparse updates to a variable reference. -- -- This operation computes -- -- # Scalar indices ref[indices, ...] = updates[...] -- -- # Vector indices (for each i) ref[indices[i], ...] = updates[i, ...] -- -- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] -- = updates[i, ..., j, ...] -- -- This operation outputs ref after the update is done. This -- makes it easier to chain operations that need to use the reset value. -- -- If values in ref is to be updated more than once, because -- there are duplicate entries in indices, the order at which -- the updates happen for each value is undefined. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/ScatterUpdate.png" alt /div scatterUpdate :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) scatterUpdate' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) -- | Computes fingerprints of the input strings. sdcaFprint :: Tensor v'1 ByteString -> Tensor Build Int64 sdcaFprint' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Int64 -- | Distributed version of Stochastic Dual Coordinate Ascent (SDCA) -- optimizer for -- -- linear models with L1 + L2 regularization. As global optimization -- objective is strongly-convex, the optimizer optimizes the dual -- objective at each step. The optimizer applies each update one example -- at a time. Examples are sampled uniformly, and the optimizer is -- learning rate free and enjoys linear convergence rate. -- -- Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai; -- Zhang, Tong. 2012 arXiv1211.2717S: -- http://arxiv.org/pdf/1211.2717v1.pdf -- -- Loss objective = sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w| -- -- Adding vs. Averaging in Distributed Primal-Dual Optimization. Chenxin -- Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, -- Martin Takac http://arxiv.org/abs/1502.03508 -- -- Stochastic Dual Coordinate Ascent with Adaptive Probabilities Dominik -- Csiba, Zheng Qu, Peter Richtarik -- https://arxiv.org/abs/1502.08053 sdcaOptimizer :: Float -> Float -> Int64 -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 Int64] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> Tensor v'5 Float -> Tensor v'6 Float -> [Tensor v'7 Int64] -> [Tensor v'8 Float] -> [Tensor v'9 Float] -> Tensor v'10 Float -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) sdcaOptimizer' :: OpParams -> Float -> Float -> Int64 -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 Int64] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> Tensor v'5 Float -> Tensor v'6 Float -> [Tensor v'7 Int64] -> [Tensor v'8 Float] -> [Tensor v'9 Float] -> Tensor v'10 Float -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) -- | Applies L1 regularization shrink step on the parameters. sdcaShrinkL1 :: (MonadBuild m') => Float -> Float -> [Tensor Ref Float] -> m' (ControlNode) sdcaShrinkL1' :: (MonadBuild m') => OpParams -> Float -> Float -> [Tensor Ref Float] -> m' (ControlNode) -- | Computes the maximum along segments of a tensor. -- -- Read the section on Segmentation for an explanation of -- segments. -- -- Computes a tensor such that \(output_i = max_j(data_j)\) where -- max is over j such that `segment_ids[j] == i`. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/SegmentMax.png" alt /div segmentMax :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t segmentMax' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -- | Computes the mean along segments of a tensor. -- -- Read the section on Segmentation for an explanation of -- segments. -- -- Computes a tensor such that \(output_i = frac{sum_j data_j}{N}\) where -- mean is over j such that `segment_ids[j] == i` and -- N is the total number of values summed. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/SegmentMean.png" alt /div segmentMean :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t segmentMean' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -- | Computes the minimum along segments of a tensor. -- -- Read the section on Segmentation for an explanation of -- segments. -- -- Computes a tensor such that \(output_i = min_j(data_j)\) where -- min is over j such that `segment_ids[j] == i`. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/SegmentMin.png" alt /div segmentMin :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t segmentMin' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -- | Computes the product along segments of a tensor. -- -- Read the section on Segmentation for an explanation of -- segments. -- -- Computes a tensor such that \(output_i = prod_j data_j\) where the -- product is over j such that `segment_ids[j] == i`. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/SegmentProd.png" alt /div segmentProd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t segmentProd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -- | Computes the sum along segments of a tensor. -- -- Read the section on Segmentation for an explanation of -- segments. -- -- Computes a tensor such that \(output_i = sum_j data_j\) where sum is -- over j such that `segment_ids[j] == i`. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/SegmentSum.png" alt /div segmentSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t segmentSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -- | Selects elements from t or e, depending on -- condition. -- -- The t, and e tensors must all have the same shape, -- and the output will also have that shape. -- -- The condition tensor must be a scalar if t and -- e are scalars. If t and e are vectors or -- higher rank, then condition must be either a scalar, a vector -- with size matching the first dimension of t, or must have the -- same shape as t. -- -- The condition tensor acts as a mask that chooses, based on -- the value at each element, whether the corresponding element / row in -- the output should be taken from t (if true) or e (if -- false). -- -- If condition is a vector and t and e are -- higher rank matrices, then it chooses which row (outer dimension) to -- copy from t and e. If condition has the -- same shape as t and e, then it chooses which element -- to copy from t and e. -- -- For example: -- -- ```prettyprint # condition tensor is [[True, False] # [False, -- True]] # t is [[1, 2], # [3, 4]] # e is [[5, 6], # -- [7, 8]] select(condition, t, e) ==> [[1, 6], [7, 4]] -- -- # condition tensor is [True, False] # t is [[1, 2], -- # [3, 4]] # e is [[5, 6], # [7, 8]] select(condition, t, e) -- ==> [[1, 2], [7, 8]] -- -- ``` select :: (TensorType t) => Tensor v'1 Bool -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t select' :: (TensorType t) => OpParams -> Tensor v'1 Bool -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t -- | Computes the Eigen Decomposition of a batch of square self-adjoint -- matrices. -- -- The input is a tensor of shape `[..., M, M]` whose inner-most 2 -- dimensions form square matrices, with the same constraints as the -- single matrix SelfAdjointEig. -- -- The result is a [..., M+1, M] matrix with [..., 0,:] containing the -- eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. selfAdjointEig :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t selfAdjointEig' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the eigen decomposition of one or more square self-adjoint -- matrices. -- -- Computes the eigenvalues and (optionally) eigenvectors of each inner -- matrix in input such that `input[..., :, :] = v[..., :, :] * -- diag(e[..., :])`. -- -- ```prettyprint # a is a tensor. # e is a tensor of eigenvalues. # v is -- a tensor of eigenvectors. e, v = self_adjoint_eig(a) e = -- self_adjoint_eig(a, compute_v=False) ``` selfAdjointEigV2 :: (OneOf '[Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t) selfAdjointEigV2' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t) -- | Serialize an N-minibatch SparseTensor into an `[N, -- 3]` string Tensor. -- -- The SparseTensor must have rank R greater than 1, -- and the first dimension is treated as the minibatch dimension. -- Elements of the SparseTensor must be sorted in increasing -- order of this first dimension. The serialized SparseTensor -- objects going into each row of serialized_sparse will have -- rank `R-1`. -- -- The minibatch size N is extracted from `sparse_shape[0]`. serializeManySparse :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString serializeManySparse' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString -- | Serialize a SparseTensor into a string 3-vector (1-D -- Tensor) object. serializeSparse :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString serializeSparse' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString -- | Number of unique elements along last dimension of input set. -- -- Input set is a SparseTensor represented by -- set_indices, set_values, and set_shape. The -- last dimension contains values in a set, duplicates are allowed but -- ignored. -- -- If validate_indices is True, this op validates the -- order and range of set indices. setSize :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build Int32 setSize' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build Int32 -- | Returns the shape of a tensor. -- -- This operation returns a 1-D integer tensor representing the shape of -- input. -- -- For example: -- -- ```prettyprint # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], -- [4, 4, 4]]] shape(t) ==> [2, 2, 3] ``` shape :: (TensorType t, OneOf '[Int32, Int64] out_type) => Tensor v'1 t -> Tensor Build out_type shape' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type -- | Returns shape of tensors. -- -- This operation returns N 1-D integer tensors representing shape of -- `input[i]s`. shapeN :: (TensorType t, OneOf '[Int32, Int64] out_type) => [Tensor v'1 t] -> [Tensor Build out_type] shapeN' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> [Tensor v'1 t] -> [Tensor Build out_type] -- | Generate a sharded filename. The filename is printf formatted as -- -- %s-%05d-of-%05d, basename, shard, num_shards. shardedFilename :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor Build ByteString shardedFilename' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor Build ByteString -- | Generate a glob pattern matching all sharded file names. shardedFilespec :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString shardedFilespec' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString -- | Computes sigmoid of x element-wise. -- -- Specifically, `y = 1 / (1 + exp(-x))`. sigmoid :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t sigmoid' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the gradient of the sigmoid of x wrt its input. -- -- Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and -- dy is the corresponding input gradient. sigmoidGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t sigmoidGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Returns an element-wise indication of the sign of a number. -- -- `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`. -- -- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y -- = 0`. sign :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t sign' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes sin of x element-wise. sin :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t sin' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns the size of a tensor. -- -- This operation returns an integer representing the number of elements -- in input. -- -- For example: -- -- ```prettyprint # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], -- [4, 4, 4]]]] size(t) ==> 12 ``` size :: (TensorType t, OneOf '[Int32, Int64] out_type) => Tensor v'1 t -> Tensor Build out_type size' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type -- | Parses a text file and creates a batch of examples. skipgram :: (MonadBuild m') => Int64 -> m' ((Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)) skipgram' :: (MonadBuild m') => OpParams -> Int64 -> m' ((Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)) -- | Return a slice from input. -- -- The output tensor is a tensor with dimensions described by size -- whose values are extracted from input starting at the offsets -- in begin. -- -- slice :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor Build t slice' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor Build t -- | Computes softmax activations. -- -- For each batch i and class j we have -- -- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j])) softmax :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t softmax' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes softmax cross entropy cost and gradients to backpropagate. -- -- Inputs are the logits, not probabilities. softmaxCrossEntropyWithLogits :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) softmaxCrossEntropyWithLogits' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) -- | Computes softplus: `log(exp(features) + 1)`. softplus :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t softplus' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes softplus gradients for a softplus operation. softplusGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t softplusGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes softsign: `features / (abs(features) + 1)`. softsign :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t softsign' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes softsign gradients for a softsign operation. softsignGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t softsignGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | SpaceToBatch for 4-D tensors of type T. -- -- This is a legacy version of the more general SpaceToBatchND. -- -- Zero-pads and then rearranges (permutes) blocks of spatial data into -- batch. More specifically, this op outputs a copy of the input tensor -- where values from the height and width dimensions -- are moved to the batch dimension. After the zero-padding, -- both height and width of the input must be divisible -- by the block size. spaceToBatch :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Int64 -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t spaceToBatch' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t -- | SpaceToBatch for N-D tensors of type T. -- -- This operation divides "spatial" dimensions `[1, ..., M]` of the input -- into a grid of blocks of shape block_shape, and interleaves -- these blocks with the "batch" dimension (0) such that in the output, -- the spatial dimensions `[1, ..., M]` correspond to the position within -- the grid, and the batch dimension combines both the position within a -- spatial block and the original batch position. Prior to division into -- blocks, the spatial dimensions of the input are optionally zero padded -- according to paddings. See below for a precise description. spaceToBatchND :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tpaddings -> Tensor Build t spaceToBatchND' :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tpaddings -> Tensor Build t -- | SpaceToDepth for tensors of type T. -- -- Rearranges blocks of spatial data, into depth. More specifically, this -- op outputs a copy of the input tensor where values from the -- height and width dimensions are moved to the -- depth dimension. The attr block_size indicates the -- input block size and how the data is moved. -- -- -- -- That is, assuming the input is in the shape: `[batch, height, width, -- depth]`, the shape of the output will be: `[batch, -- heightblock_size, widthblock_size, -- depth*block_size*block_size]` -- -- This operation requires that the input tensor be of rank 4, and that -- block_size be >=1 and a divisor of both the input -- height and width. -- -- This operation is useful for resizing the activations between -- convolutions (but keeping all data), e.g. instead of pooling. It is -- also useful for training purely convolutional models. -- -- For example, given this input of shape `[1, 2, 2, 1]`, and block_size -- of 2: -- -- ```prettyprint x = [[[[1], [2]], [[3], [4]]]] ``` -- -- This operation will output a tensor of shape `[1, 1, 1, 4]`: -- -- ```prettyprint [[[[1, 2, 3, 4]]]] ``` -- -- Here, the input has a batch of 1 and each batch element has shape `[2, -- 2, 1]`, the corresponding output will have a single element (i.e. -- width and height are both 1) and will have a depth of 4 channels (1 * -- block_size * block_size). The output element shape is `[1, 1, 4]`. -- -- For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, -- e.g. -- -- ```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, -- 12]]]] ``` -- -- This operation, for block_size of 2, will return the following tensor -- of shape `[1, 1, 1, 12]` -- -- ```prettyprint [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ``` -- -- Similarly, for the following input of shape `[1 4 4 1]`, and a block -- size of 2: -- -- ```prettyprint x = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]], [[9], -- [10], [13], [14]], [[11], [12], [15], [16]]]] ``` -- -- the operator will return the following tensor of shape `[1 2 2 4]`: -- -- ```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], -- [13, 14, 15, 16]]]] ``` spaceToDepth :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor Build t spaceToDepth' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor Build t -- | Applies a sparse gradient to a given accumulator. Does not add if -- local_step is -- -- lesser than the accumulator's global_step. sparseAccumulatorApplyGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Bool -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 dtype -> Tensor v'5 Int64 -> m' (ControlNode) sparseAccumulatorApplyGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Bool -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 dtype -> Tensor v'5 Int64 -> m' (ControlNode) -- | Extracts the average sparse gradient in the given -- SparseConditionalAccumulator, -- -- provided that sufficient (i.e., more than num_required) gradients have -- been accumulated. The op will blocks until sufficient gradients have -- been accumulated. If the accumulator has already aggregated more than -- num_required gradients, it will return its average of the accumulated -- gradients. Also automatically increments the recorded global_step in -- the accumulator by 1, and resets the aggregate to 0. sparseAccumulatorTakeGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) sparseAccumulatorTakeGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) -- | Adds two SparseTensor objects to produce another -- SparseTensor. -- -- The input SparseTensor objects' indices are assumed ordered -- in standard lexicographic order. If this is not the case, before this -- step run SparseReorder to restore index ordering. -- -- By default, if two values sum to zero at some index, the output -- SparseTensor would still include that particular location in -- its index, storing a zero in the corresponding value slot. To override -- this, callers can specify thresh, indicating that if the sum -- has a magnitude strictly smaller than thresh, its -- corresponding value and index would then not be included. In -- particular, `thresh == 0` (default) means everything is kept and -- actual thresholding happens only for a positive value. -- -- In the following shapes, nnz is the count after taking -- thresh into account. sparseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] treal) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor v'7 treal -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) sparseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] treal) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor v'7 treal -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -- | The gradient operator for the SparseAdd op. -- -- The SparseAdd op calculates A + B, where A, B, and the sum are all -- represented as SparseTensor objects. This op takes in the -- upstream gradient w.r.t. non-empty values of the sum, and outputs the -- gradients w.r.t. the non-empty values of A and B. sparseAddGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> (Tensor Build t, Tensor Build t) sparseAddGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> (Tensor Build t, Tensor Build t) -- | var: Should be from a Variable(). sparseApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (Tensor Ref t) sparseApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (Tensor Ref t) -- | Update relevant entries in '*var' and '*accum' according to the -- adagrad scheme. -- -- That is for rows we have grad for, we update var and accum as follows: -- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) sparseApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (Tensor Ref t) sparseApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (Tensor Ref t) -- | Update entries in '*var' and '*accum' according to the proximal -- adagrad scheme. sparseApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (Tensor Ref t) sparseApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (Tensor Ref t) -- | Update '*var' according to the centered RMSProp algorithm. -- -- The centered RMSProp algorithm uses an estimate of the centered second -- moment (i.e., the variance) for normalization, as opposed to regular -- RMSProp, which uses the (uncentered) second moment. This often helps -- with training, but is slightly more expensive in terms of computation -- and memory. -- -- Note that in dense implementation of this algorithm, mg, ms, and mom -- will update even if the grad is zero, but in this sparse -- implementation, mg, ms, and mom will not update in iterations during -- which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- mean_grad = decay * mean_grad + (1-decay) * gradient Delta = -- learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** -- 2) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * -- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom sparseApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (Tensor Ref t) sparseApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (Tensor Ref t) -- | Update relevant entries in '*var' according to the Ftrl-proximal -- scheme. -- -- That is for rows we have grad for, we update var, accum and linear as -- follows: accum_new = accum + grad * grad linear += grad + -- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 -- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - -- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new sparseApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) sparseApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) -- | Update relevant entries in '*var' and '*accum' according to the -- momentum scheme. -- -- Set use_nesterov = True if you want to use Nesterov momentum. -- -- That is for rows we have grad for, we update var and accum as follows: -- -- accum = accum * momentum + grad var -= lr * accum sparseApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (Tensor Ref t) sparseApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (Tensor Ref t) -- | Sparse update entries in '*var' and '*accum' according to FOBOS -- algorithm. -- -- That is for rows we have grad for, we update var and accum as follows: -- accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / -- sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} sparseApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (Tensor Ref t) sparseApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (Tensor Ref t) -- | Sparse update '*var' as FOBOS algorithm with fixed learning rate. -- -- That is for rows we have grad for, we update var as follows: prox_v = -- var - alpha * grad var = sign(prox_v)/(1+alpha*l2) * -- max{|prox_v|-alpha*l1,0} sparseApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (Tensor Ref t) sparseApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (Tensor Ref t) -- | Update '*var' according to the RMSProp algorithm. -- -- Note that in dense implementation of this algorithm, ms and mom will -- update even if the grad is zero, but in this sparse implementation, ms -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = -- learning_rate * gradient / sqrt(mean_square + epsilon) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * -- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom sparseApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (Tensor Ref t) sparseApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (Tensor Ref t) -- | Concatenates a list of SparseTensor along the specified -- dimension. -- -- Concatenation is with respect to the dense versions of these sparse -- tensors. It is assumed that each input is a SparseTensor -- whose elements are ordered along increasing dimension number. -- -- All inputs' shapes must match, except for the concat dimension. The -- indices, values, and shapes lists must have -- the same length. -- -- The output shape is identical to the inputs', except along the concat -- dimension, where it is the sum of the inputs' sizes along that -- dimension. -- -- The output elements will be resorted to preserve the sort order along -- increasing dimension number. -- -- This op runs in `O(M log M)` time, where M is the total -- number of non-empty values across all inputs. This is due to the need -- for an internal sort in order to concatenate efficiently across an -- arbitrary dimension. -- -- For example, if `concat_dim = 1` and the inputs are -- -- sp_inputs[0]: shape = [2, 3] [0, 2]: "a" [1, 0]: "b" [1, 1]: "c" -- -- sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e" -- -- then the output will be -- -- shape = [2, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [1, 1]: -- "c" -- -- Graphically this is equivalent to doing -- -- sparseConcat :: (TensorType t) => Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 t] -> [Tensor v'3 Int64] -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) sparseConcat' :: (TensorType t) => OpParams -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 t] -> [Tensor v'3 Int64] -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -- | A conditional accumulator for aggregating sparse gradients. The -- accumulator -- -- accepts gradients marked with local_step greater or equal to the most -- recent global_step known to the accumulator. The average can be -- extracted from the accumulator, provided sufficient gradients have -- been accumulated. Extracting the average automatically resets the -- aggregate to 0, and increments the global_step recorded by the -- accumulator. sparseConditionalAccumulator :: (MonadBuild m') => DataType -> Shape -> m' (Tensor Ref ByteString) sparseConditionalAccumulator' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (Tensor Ref ByteString) -- | Adds up a SparseTensor and a dense Tensor, using these special rules: -- --
    --
  1. Broadcasts the dense side to have the same shape as the sparse -- side, if eligible;
  2. --
  3. Then, only the dense values pointed to by the indices of the -- SparseTensor participate in the cwise addition.
  4. --
-- -- By these rules, the result is a logical SparseTensor with exactly the -- same indices and shape, but possibly with different non-zero values. -- The output of this Op is the resultant non-zero values. sparseDenseCwiseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t sparseDenseCwiseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t -- | Component-wise divides a SparseTensor by a dense Tensor. -- -- sparseDenseCwiseDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t sparseDenseCwiseDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t -- | Component-wise multiplies a SparseTensor by a dense Tensor. -- -- The output locations corresponding to the implicitly zero elements in -- the sparse tensor will be zero (i.e., will not take up storage space), -- regardless of the contents of the dense tensor (even if it's +/-INF -- and that INF*0 == NaN). -- -- sparseDenseCwiseMul :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t sparseDenseCwiseMul' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t -- | Multiply matrix "a" by matrix "b". -- -- The inputs must be two-dimensional matrices and the inner dimension of -- "a" must match the outer dimension of "b". This op is optimized for -- the case where at least one of "a" or "b" is sparse. The breakeven for -- using this versus a dense matrix multiply on one platform was 30% zero -- values in the sparse matrix. sparseMatMul :: (OneOf '[Word16, Float] ta, OneOf '[Word16, Float] tb) => Tensor v'1 ta -> Tensor v'2 tb -> Tensor Build Float sparseMatMul' :: (OneOf '[Word16, Float] ta, OneOf '[Word16, Float] tb) => OpParams -> Tensor v'1 ta -> Tensor v'2 tb -> Tensor Build Float -- | Computes the sum of elements across dimensions of a SparseTensor. -- -- This Op takes a SparseTensor and is the sparse counterpart to -- `tf.reduce_sum()`. In particular, this Op also returns a dense -- Tensor instead of a sparse one. -- -- Reduces sp_input along the dimensions given in -- reduction_axes. Unless keep_dims is true, the rank -- of the tensor is reduced by 1 for each entry in -- reduction_axes. If keep_dims is true, the reduced -- dimensions are retained with length 1. -- -- If reduction_axes has no entries, all dimensions are reduced, -- and a tensor with a single element is returned. Additionally, the axes -- can be negative, which are interpreted according to the indexing rules -- in Python. sparseReduceSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> Tensor Build t sparseReduceSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> Tensor Build t -- | Computes the sum of elements across dimensions of a SparseTensor. -- -- This Op takes a SparseTensor and is the sparse counterpart to -- `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a -- SparseTensor. -- -- Reduces sp_input along the dimensions given in -- reduction_axes. Unless keep_dims is true, the rank -- of the tensor is reduced by 1 for each entry in -- reduction_axes. If keep_dims is true, the reduced -- dimensions are retained with length 1. -- -- If reduction_axes has no entries, all dimensions are reduced, -- and a tensor with a single element is returned. Additionally, the axes -- can be negative, which are interpreted according to the indexing rules -- in Python. sparseReduceSumSparse :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) sparseReduceSumSparse' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -- | Reorders a SparseTensor into the canonical, row-major ordering. -- -- Note that by convention, all sparse ops preserve the canonical -- ordering along increasing dimension number. The only time ordering can -- be violated is during manual manipulation of the indices and values -- vectors to add entries. -- -- Reordering does not affect the shape of the SparseTensor. -- -- If the tensor has rank R and N non-empty values, -- input_indices has shape `[N, R]`, input_values has length -- N, and input_shape has length R. sparseReorder :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build t) sparseReorder' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build t) -- | Reshapes a SparseTensor to represent values in a new dense shape. -- -- This operation has the same semantics as reshape on the represented -- dense tensor. The input_indices are recomputed based on the -- requested new_shape. -- -- If one component of new_shape is the special value -1, the -- size of that dimension is computed so that the total dense size -- remains constant. At most one component of new_shape can be -- -1. The number of dense elements implied by new_shape must be -- the same as the number of dense elements originally implied by -- input_shape. -- -- Reshaping does not affect the order of values in the SparseTensor. -- -- If the input tensor has rank R_in and N non-empty -- values, and new_shape has length R_out, then -- input_indices has shape `[N, R_in]`, input_shape has -- length R_in, output_indices has shape `[N, R_out]`, -- and output_shape has length R_out. sparseReshape :: Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build Int64) sparseReshape' :: OpParams -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build Int64) -- | Computes the mean along sparse segments of a tensor. -- -- Read the section on Segmentation for an explanation of -- segments. -- -- Like SegmentMean, but segment_ids can have rank less -- than `data`'s first dimension, selecting a subset of dimension 0, -- specified by indices. sparseSegmentMean :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t sparseSegmentMean' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t -- | Computes gradients for SparseSegmentMean. -- -- Returns tensor "output" with same shape as grad, except for dimension -- 0 whose value is output_dim0. sparseSegmentMeanGrad :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t sparseSegmentMeanGrad' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t -- | Computes the sum along sparse segments of a tensor divided by the sqrt -- of N. -- -- N is the size of the segment being reduced. -- -- Read the section on Segmentation for an explanation of -- segments. sparseSegmentSqrtN :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t sparseSegmentSqrtN' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t -- | Computes gradients for SparseSegmentSqrtN. -- -- Returns tensor "output" with same shape as grad, except for dimension -- 0 whose value is output_dim0. sparseSegmentSqrtNGrad :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t sparseSegmentSqrtNGrad' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t -- | Computes the sum along sparse segments of a tensor. -- -- Read the section on Segmentation for an explanation of -- segments. -- -- Like SegmentSum, but segment_ids can have rank less -- than `data`'s first dimension, selecting a subset of dimension 0, -- specified by indices. -- -- For example: -- -- ```prettyprint c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) -- -- # Select two rows, one segment. tf.sparse_segment_sum(c, -- tf.constant([0, 1]), tf.constant([0, 0])) ==> [[0 0 0 0]] -- -- # Select two rows, two segment. tf.sparse_segment_sum(c, -- tf.constant([0, 1]), tf.constant([0, 1])) ==> [[ 1 2 3 4] [-1 -2 -3 -- -4]] -- -- # Select all rows, two segments. tf.sparse_segment_sum(c, -- tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) ==> [[0 0 0 0] [5 6 -- 7 8]] -- -- # Which is equivalent to: tf.segment_sum(c, tf.constant([0, 0, 1])) -- ``` sparseSegmentSum :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t sparseSegmentSum' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t -- | Applies softmax to a batched N-D SparseTensor. -- -- The inputs represent an N-D SparseTensor with logical shape `[..., B, -- C]` (where `N >= 2`), and with indices sorted in the canonical -- lexicographic order. -- -- This op is equivalent to applying the normal `tf.nn.softmax()` to each -- innermost logical submatrix with shape `[B, C]`, but with the catch -- that *the implicitly zero elements do not participate*. Specifically, -- the algorithm is equivalent to the following: -- --
    --
  1. Applies `tf.nn.softmax()` to a densified view of each innermost -- submatrix with shape `[B, C]`, along the size-C dimension;
  2. --
  3. Masks out the original implicitly-zero locations;
  4. --
  5. Renormalizes the remaining elements.
  6. --
-- -- Hence, the SparseTensor result has exactly the same non-zero -- indices and shape. sparseSoftmax :: (OneOf '[Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build t sparseSoftmax' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build t -- | Computes softmax cross entropy cost and gradients to backpropagate. -- -- Unlike SoftmaxCrossEntropyWithLogits, this operation does not -- accept a matrix of label probabilities, but rather a single label per -- row of features. This label is considered to have probability 1.0 for -- the given row. -- -- Inputs are the logits, not probabilities. sparseSoftmaxCrossEntropyWithLogits :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tlabels) => Tensor v'1 t -> Tensor v'2 tlabels -> (Tensor Build t, Tensor Build t) sparseSoftmaxCrossEntropyWithLogits' :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tlabels) => OpParams -> Tensor v'1 t -> Tensor v'2 tlabels -> (Tensor Build t, Tensor Build t) -- | Returns the element-wise max of two SparseTensors. -- -- Assumes the two SparseTensors have the same shape, i.e., no -- broadcasting. sparseSparseMaximum :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) sparseSparseMaximum' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) -- | Returns the element-wise min of two SparseTensors. -- -- Assumes the two SparseTensors have the same shape, i.e., no -- broadcasting. sparseSparseMinimum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) sparseSparseMinimum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) -- | Split a SparseTensor into num_split tensors along -- one dimension. -- -- If the `shape[split_dim]` is not an integer multiple of -- num_split. Slices `[0 : shape[split_dim] % num_split]` gets -- one extra dimension. For example, if `split_dim = 1` and `num_split = -- 2` and the input is -- -- input_tensor = shape = [2, 7] [ a d e ] [b c ] -- -- Graphically the output tensors are: -- -- output_tensor[0] = shape = [2, 4] [ a ] [b c ] -- -- output_tensor[1] = shape = [2, 3] [ d e ] [ ] sparseSplit :: (TensorType t) => Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64]) sparseSplit' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64]) -- | Adds up a SparseTensor and a dense Tensor, producing a -- dense Tensor. -- -- This Op does not require a_indices be sorted in standard -- lexicographic order. sparseTensorDenseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor v'4 t -> Tensor Build t sparseTensorDenseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor v'4 t -> Tensor Build t -- | Multiply SparseTensor (of rank 2) A by dense matrix B. -- -- No validity checking is performed on the indices of A. However, the -- following input format is recommended for optimal behavior: -- -- if adjoint_a == false: A should be sorted in lexicographically -- increasing order. Use SparseReorder if you're not sure. if adjoint_a -- == true: A should be sorted in order of increasing dimension 1 (i.e., -- "column major" order instead of "row major" order). sparseTensorDenseMatMul :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t sparseTensorDenseMatMul' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t -- | Converts a sparse representation into a dense tensor. -- -- Builds an array dense with shape output_shape such -- that -- -- ```prettyprint # If sparse_indices is scalar dense[i] = (i == -- sparse_indices ? sparse_values : default_value) -- -- # If sparse_indices is a vector, then for each i -- dense[sparse_indices[i]] = sparse_values[i] -- -- # If sparse_indices is an n by d matrix, then for each i in [0, n) -- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = -- sparse_values[i] ``` -- -- All other values in dense are set to default_value. -- If sparse_values is a scalar, all sparse indices are set to -- this single value. -- -- Indices should be sorted in lexicographic order, and indices must not -- contain any repeats. If validate_indices is true, these -- properties are checked during execution. sparseToDense :: (TensorType t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t sparseToDense' :: (TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t -- | Applies set operation along last dimension of 2 SparseTensor -- inputs. -- -- See SetOperationOp::SetOperationFromContext for values of -- set_operation. -- -- If validate_indices is True, -- SparseToSparseSetOperation validates the order and range of -- set1 and set2 indices. -- -- Input set1 is a SparseTensor represented by -- set1_indices, set1_values, and set1_shape. -- For set1 ranked n, 1st `n-1` dimensions must be the -- same as set2. Dimension n contains values in a set, -- duplicates are allowed but ignored. -- -- Input set2 is a SparseTensor represented by -- set2_indices, set2_values, and set2_shape. -- For set2 ranked n, 1st `n-1` dimensions must be the -- same as set1. Dimension n contains values in a set, -- duplicates are allowed but ignored. -- -- If validate_indices is True, this op validates the -- order and range of set1 and set2 indices. -- -- Output result is a SparseTensor represented by -- result_indices, result_values, and -- result_shape. For set1 and set2 ranked -- n, this has rank n and the same 1st `n-1` dimensions -- as set1 and set2. The nth dimension -- contains the result of set_operation applied to the -- corresponding `[0...n-1]` dimension of set. sparseToSparseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) sparseToSparseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) -- | Splits a tensor into num_split tensors along one dimension. split :: (TensorType t) => Int64 -> Tensor v'1 Int32 -> Tensor v'2 t -> [Tensor Build t] split' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 Int32 -> Tensor v'2 t -> [Tensor Build t] -- | Splits a tensor into num_split tensors along one dimension. splitV :: (TensorType t, OneOf '[Int32, Int64] tlen) => Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor v'3 Int32 -> [Tensor Build t] splitV' :: (TensorType t, OneOf '[Int32, Int64] tlen) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor v'3 Int32 -> [Tensor Build t] -- | Computes square root of x element-wise. -- -- I.e., \(y = sqrt{x} = x^{1/2}\). sqrt :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t sqrt' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the gradient for the sqrt of x wrt its input. -- -- Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and -- dy is the corresponding input gradient. sqrtGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t sqrtGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes square of x element-wise. -- -- I.e., \(y = x * x = x^2\). square :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t square' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns (x - y)(x - y) element-wise. -- -- squaredDifference :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t squaredDifference' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Removes dimensions of size 1 from the shape of a tensor. -- -- Given a tensor input, this operation returns a tensor of the -- same type with all dimensions of size 1 removed. If you don't want to -- remove all size 1 dimensions, you can remove specific size 1 -- dimensions by specifying squeeze_dims. -- -- For example: -- -- ```prettyprint # t is a tensor of shape [1, 2, 1, 3, 1, 1] -- shape(squeeze(t)) ==> [2, 3] ``` -- -- Or, to remove specific size 1 dimensions: -- -- ```prettyprint # t is a tensor of shape [1, 2, 1, 3, 1, 1] -- shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] ``` squeeze :: (TensorType t) => Tensor v'1 t -> Tensor Build t squeeze' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | A stack that produces elements in first-in last-out order. stack :: (MonadBuild m') => DataType -> m' (Tensor Ref ByteString) stack' :: (MonadBuild m') => OpParams -> DataType -> m' (Tensor Ref ByteString) -- | Delete the stack from its resource container. stackClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) stackClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) -- | Pop the element at the top of the stack. stackPop :: (MonadBuild m', TensorType elem_type) => Tensor Ref ByteString -> m' (Tensor Value elem_type) stackPop' :: (MonadBuild m', TensorType elem_type) => OpParams -> Tensor Ref ByteString -> m' (Tensor Value elem_type) -- | Push an element onto the stack. stackPush :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> m' (Tensor Value t) stackPush' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> m' (Tensor Value t) -- | Stage values similar to a lightweight Enqueue. The basic functionality -- of this -- -- Op is similar to a queue with many fewer capabilities and options. -- This Op is optimized for performance. stage :: (MonadBuild m', TensorTypes dtypes) => TensorList (v'1) dtypes -> m' (ControlNode) stage' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> TensorList (v'1) dtypes -> m' (ControlNode) -- | Stops gradient computation. -- -- When executed in a graph, this op outputs its input tensor as-is. -- -- When building ops to compute gradients, this op prevents the -- contribution of its inputs to be taken into account. Normally, the -- gradient generator adds ops to a graph to compute the derivatives of a -- specified loss by recursively finding out inputs that -- contributed to its computation. If you insert this op in the graph it -- inputs are masked from the gradient generator. They are not taken into -- account for computing gradients. -- -- This is useful any time you want to compute a value with TensorFlow -- but need to pretend that the value was a constant. Some examples -- include: -- -- stopGradient :: (TensorType t) => Tensor v'1 t -> Tensor Build t stopGradient' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Return a strided slice from input. -- -- Note, most python users will want to use the Python __getitem__ -- or __getitem__ rather than this op directly. -- -- The goal of this op is to produce a new tensor with a subset of the -- elements from the n dimensional input tensor. The -- subset is chosen using a sequence of m sparse range -- specifications encoded into the arguments of this function. Note, in -- some cases m could be equal to n, but this need not -- be the case. Each range specification entry can be one of the -- following: -- -- -- -- Each conceptual range specification is encoded in the op's argument. -- This encoding is best understand by considering a non-trivial example. -- In particular, `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as -- -- ```prettyprint begin = [1, 2, x, x, 0, x] # x denotes don't care -- (usually 0) end = [2, 4, x, x, -3, x] strides = [1, 1, x, x, -1, 1] -- begin_mask = 1<<4 | 1 << 5 = 48 end_mask = 1<<5 = 32 -- ellipsis_mask = 1<<3 = 8 new_axis_mask = 1<<2 4 -- shrink_axis_mask = 1<<0 ``` -- -- In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of -- the slice becomes (2, 1, 5, 5, 2, 5). Let us walk step by step through -- each argument specification. -- --
    --
  1. The first argument in the example slice is turned into `begin = 1` -- and `end = begin + 1 = 2`. To disambiguate from the original spec -- `2:4` we also set the appropriate bit in -- shrink_axis_mask.
  2. --
  3. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks -- have zero bits contributed.
  4. --
  5. None is a synonym for `tf.newaxis`. This means insert a dimension -- of size 1 dimension in the final shape. Dummy values are contributed -- to begin, end and stride, while the new_axis_mask bit is set.
  6. --
  7. ... grab the full ranges from as many dimensions as -- needed to fully specify a slice for every dimension of the input -- shape.
  8. --
  9. `:-3:-1` shows the use of negative indices. A negative index -- i associated with a dimension that has shape s is -- converted to a positive index `s + i`. So `-1` becomes `s-1` (i.e. the -- last element). This conversion is done internally so begin, end and -- strides receive x, -3, and -1. The appropriate begin_mask bit is set -- to indicate the start range is the full range (ignoring the x).
  10. --
  11. : indicates that the entire contents of the corresponding -- dimension is selected. This is equivalent to `::` or `0::1`. begin, -- end, and strides receive 0, 0, and 1, respectively. The appropriate -- bits in begin_mask and end_mask are also set.
  12. --
-- -- stridedSlice :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor Build t stridedSlice' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor Build t -- | Assign value to the sliced l-value reference of ref. -- -- The values of value are assigned to the positions in the -- variable ref that are selected by the slice parameters. The -- slice parameters `begin, end, strides, etc. work -- exactly as in StridedSlice. -- -- NOTE this op currently does not support broadcasting and so -- value's shape must be exactly the shape produced by the slice -- of ref. stridedSliceAssign :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) => Tensor Ref t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> m' (Tensor Ref t) stridedSliceAssign' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor Ref t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> m' (Tensor Ref t) -- | Returns the gradient of StridedSlice. -- -- Since StridedSlice cuts out pieces of its input -- which is size shape, its gradient will have the same shape -- (which is passed here as shape). The gradient will be zero in -- any element that the slice does not select. -- -- Arguments are the same as StridedSliceGrad with the exception that -- dy is the input gradient to be propagated and shape is -- the shape of StridedSlice's input. stridedSliceGrad :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 index -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> Tensor Build t stridedSliceGrad' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 index -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> Tensor Build t -- | Joins the strings in the given list of string tensors into one tensor; -- -- with the given separator (default is an empty separator). stringJoin :: [Tensor v'1 ByteString] -> Tensor Build ByteString stringJoin' :: OpParams -> [Tensor v'1 ByteString] -> Tensor Build ByteString -- | Split elements of input based on delimiter into a -- SparseTensor. -- -- Let N be the size of source (typically N will be the batch size). -- Split each element of input based on delimiter and -- return a SparseTensor containing the splitted tokens. Empty -- tokens are ignored. -- -- delimiter can be empty, or a string of split characters. If -- delimiter is an empty string, each element of input -- is split into individual single-byte character strings, including -- splitting of UTF-8 multibyte sequences. Otherwise every character of -- delimiter is a potential split point. -- -- For example: N = 2, input[0] is 'hello world' and input[1] is 'a b c', -- then the output will be -- -- indices = [0, 0; 0, 1; 1, 0; 1, 1; 1, 2] shape = [2, 3] values = -- [hello, world, a, b, c] stringSplit :: Tensor v'1 ByteString -> Tensor v'2 ByteString -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64) stringSplit' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64) -- | Converts each string in the input Tensor to its hash mod by a number -- of buckets. -- -- The hash function is deterministic on the content of the string within -- the process. -- -- Note that the hash function may change from time to time. This -- functionality will be deprecated and it's recommended to use -- `tf.string_to_hash_bucket_fast()` or -- `tf.string_to_hash_bucket_strong()`. stringToHashBucket :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 stringToHashBucket' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 -- | Converts each string in the input Tensor to its hash mod by a number -- of buckets. -- -- The hash function is deterministic on the content of the string within -- the process and will never change. However, it is not suitable for -- cryptography. This function may be used when CPU time is scarce and -- inputs are trusted or unimportant. There is a risk of adversaries -- constructing inputs that all hash to the same bucket. To prevent this -- problem, use a strong hash function with -- `tf.string_to_hash_bucket_strong`. stringToHashBucketFast :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 stringToHashBucketFast' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 -- | Converts each string in the input Tensor to its hash mod by a number -- of buckets. -- -- The hash function is deterministic on the content of the string within -- the process. The hash function is a keyed hash function, where -- attribute key defines the key of the hash function. -- key is an array of 2 elements. -- -- A strong hash is important when inputs may be malicious, e.g. URLs -- with additional components. Adversaries could try to make their inputs -- hash to the same bucket for a denial-of-service attack or to skew the -- results. A strong hash prevents this by making it dificult, if not -- infeasible, to compute inputs that hash to the same bucket. This comes -- at a cost of roughly 4x higher compute time than -- `tf.string_to_hash_bucket_fast`. stringToHashBucketStrong :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 stringToHashBucketStrong' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 -- | Converts each string in the input Tensor to the specified numeric -- type. -- -- (Note that int32 overflow results in an error while float overflow -- results in a rounded value.) stringToNumber :: (OneOf '[Int32, Float] out_type) => Tensor v'1 ByteString -> Tensor Build out_type stringToNumber' :: (OneOf '[Int32, Float] out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type -- | Returns x - y element-wise. -- -- sub :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t sub' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Return substrings from Tensor of strings. -- -- For each string in the input Tensor, creates a substring -- starting at index pos with a total length of len. -- -- If len defines a substring that would extend beyond the -- length of the input string, then as many characters as possible are -- used. -- -- If pos is negative or specifies a character index larger than -- any of the input strings, then an InvalidArgumentError is -- thrown. -- -- pos and len must have the same shape, otherwise a -- ValueError is thrown on Op creation. -- -- -- -- Examples -- -- Using scalar pos and len: -- -- ``` input = [bHello, bWorld] position = 1 length = 3 -- -- output = [bell, borl] ``` -- -- Using pos and len with same shape as input: -- -- ``` input = [[bten, beleven, btwelve], -- [bthirteen, bfourteen, bfifteen], -- [bsixteen, bseventeen, beighteen]] position -- = [[1, 2, 3], [1, 2, 3], [1, 2, 3]] length = [[2, 3, 4], [4, 3, 2], -- [5, 5, 5]] -- -- output = [[ben, beve, blve], -- [bhirt, burt, bte], [bixtee, -- bvente, bhteen]] ``` -- -- Broadcasting pos and len onto input: -- -- ``` input = [[bten, beleven, btwelve], -- [bthirteen, bfourteen, bfifteen], -- [bsixteen, bseventeen, beighteen], -- [bnineteen, btwenty, btwentyone]] position -- = [1, 2, 3] length = [1, 2, 3] -- -- output = [[be, bev, blve], [bh, -- bur, btee], [bi, bve, -- bhte], [bi, ben, bnty]] ``` -- -- Broadcasting input onto pos and len: -- -- ``` input = bthirteen position = [1, 5, 7] length = [3, 2, 1] -- -- output = [bhir, bee, b'n"] ``` substr :: (OneOf '[Int32, Int64] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build ByteString substr' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build ByteString -- | Computes the sum of elements across dimensions of a tensor. -- -- Reduces input along the dimensions given in -- reduction_indices. Unless keep_dims is true, the -- rank of the tensor is reduced by 1 for each entry in -- reduction_indices. If keep_dims is true, the reduced -- dimensions are retained with length 1. sum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t sum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -- | Computes the singular value decompositions of one or more matrices. -- -- Computes the SVD of each inner matrix in input such that -- `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * -- transpose(v[..., :, :])` -- -- ```prettyprint # a is a tensor containing a batch of matrices. # s is -- a tensor of singular values for each matrix. # u is the tensor -- containing of left singular vectors for each matrix. # v is the tensor -- containing of right singular vectors for each matrix. s, u, v = svd(a) -- s, _, _ = svd(a, compute_uv=False) ``` svd :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) svd' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) -- | Forwards `data` to the output port determined by pred. -- -- If pred is true, the `data` input is forwarded to -- output_true. Otherwise, the data goes to -- output_false. -- -- See also RefSwitch and Merge. switch :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Bool -> (Tensor Build t, Tensor Build t) switch' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Bool -> (Tensor Build t, Tensor Build t) -- | A Reader that outputs the records from a TensorFlow Records file. tFRecordReader :: (MonadBuild m') => m' (Tensor Ref ByteString) tFRecordReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) -- | A Reader that outputs the records from a TensorFlow Records file. tFRecordReaderV2 :: (MonadBuild m') => m' (ResourceHandle) tFRecordReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) -- | Read SparseTensors from a SparseTensorsMap and -- concatenate them. -- -- The input sparse_handles must be an int64 matrix of -- shape `[N, 1]` where N is the minibatch size and the rows -- correspond to the output handles of AddSparseToTensorsMap or -- AddManySparseToTensorsMap. The ranks of the original -- SparseTensor objects that went into the given input ops must -- all match. When the final SparseTensor is created, it has -- rank one higher than the ranks of the incoming SparseTensor -- objects (they have been concatenated along a new row dimension on the -- left). -- -- The output SparseTensor object's shape values for all -- dimensions but the first are the max across the input -- SparseTensor objects' shape values for the corresponding -- dimensions. Its first shape value is N, the minibatch size. -- -- The input SparseTensor objects' indices are assumed ordered -- in standard lexicographic order. If this is not the case, after this -- step run SparseReorder to restore index ordering. -- -- For example, if the handles represent an input, which is a `[2, 3]` -- matrix representing two original SparseTensor objects: -- -- ``` index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] ``` -- -- and -- -- ``` index = [ 2] [10] values = [4, 5] shape = [30] ``` -- -- then the final SparseTensor will be: -- -- ``` index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] -- shape = [2 50] ``` takeManySparseFromTensorsMap :: (MonadBuild m', TensorType dtype) => Tensor v'1 Int64 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) takeManySparseFromTensorsMap' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 Int64 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) -- | Computes tan of x element-wise. tan :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t tan' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes hyperbolic tangent of x element-wise. tanh :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t tanh' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes the gradient for the tanh of x wrt its input. -- -- Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and -- dy is the corresponding input gradient. tanhGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t tanhGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Returns a tensor that may be mutated, but only persists within a -- single step. -- -- This is an experimental op for internal use only and it is possible to -- use this op in unsafe ways. DO NOT USE unless you fully understand the -- risks. -- -- It is the caller's responsibility to ensure that ref is -- eventually passed to a matching DestroyTemporaryVariable op -- after all other uses have completed. -- -- Outputs a ref to the tensor state so it may be read or modified. -- -- E.g. var = state_ops._temporary_variable([1, 2], types.float_) -- var_name = var.op.name var = state_ops.assign(var, [[4.0, 5.0]]) var = -- state_ops.assign_add(var, [[6.0, 7.0]]) final = -- state_ops._destroy_temporary_variable(var, var_name=var_name) temporaryVariable :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) temporaryVariable' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) tensorArray :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' (Tensor Ref ByteString) tensorArray' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' (Tensor Ref ByteString) tensorArrayClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) tensorArrayClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) -- | Deprecated. Use TensorArrayCloseV3 tensorArrayCloseV2 :: (MonadBuild m') => Tensor v'1 ByteString -> m' (ControlNode) tensorArrayCloseV2' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> m' (ControlNode) -- | Delete the TensorArray from its resource container. This enables -- -- the user to close and release the resource in the middle of a -- step/run. tensorArrayCloseV3 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode) tensorArrayCloseV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode) tensorArrayConcat :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) tensorArrayConcat' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) -- | Deprecated. Use TensorArrayConcatV3 tensorArrayConcatV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Float -> (Tensor Build dtype, Tensor Build Int64) tensorArrayConcatV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> (Tensor Build dtype, Tensor Build Int64) -- | Concat the elements from the TensorArray into value value. -- -- Takes T elements of shapes -- -- ``` (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 -- x ...) ``` -- -- and concatenates them into a Tensor of shape: -- -- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` -- -- All elements must have the same shape (excepting the first dimension). tensorArrayConcatV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) tensorArrayConcatV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) tensorArrayGather :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) tensorArrayGather' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) -- | Deprecated. Use TensorArrayGatherV3 tensorArrayGatherV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype tensorArrayGatherV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype -- | Gather specific elements from the TensorArray into output -- value. -- -- All elements selected by indices must have the same shape. tensorArrayGatherV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) tensorArrayGatherV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) tensorArrayGrad :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Ref ByteString) tensorArrayGrad' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Ref ByteString) -- | Deprecated. Use TensorArrayGradV3 tensorArrayGradV2 :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Value ByteString) tensorArrayGradV2' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Value ByteString) -- | Creates a TensorArray for storing the gradients of values in the given -- handle. -- -- If the given TensorArray gradient already exists, returns a reference -- to it. -- -- Locks the size of the original TensorArray by disabling its dynamic -- size flag. -- -- -- -- The handle flow_in forces the execution of the gradient lookup to -- occur only after certain other operations have occurred. For example, -- when the forward TensorArray is dynamically sized, writes to this -- TensorArray may resize the object. The gradient TensorArray is -- statically sized based on the size of the forward TensorArray when -- this operation executes. Furthermore, the size of the forward -- TensorArray is frozen by this call. As a result, the flow is used to -- ensure that the call to generate the gradient TensorArray only happens -- after all writes are executed. -- -- In the case of dynamically sized TensorArrays, gradient computation -- should only be performed on read operations that have themselves been -- chained via flow to occur only after all writes have executed. That -- way the final size of the forward TensorArray is known when this -- operation is called. -- -- -- -- TensorArray gradient calls use an accumulator TensorArray object. If -- multiple gradients are calculated and run in the same session, the -- multiple gradient nodes may accidentally flow throuth the same -- accumulator TensorArray. This double counts and generally breaks the -- TensorArray gradient flow. -- -- The solution is to identify which gradient call this particular -- TensorArray gradient is being called in. This is performed by -- identifying a unique string (e.g. "gradients", "gradients_1", ...) -- from the input gradient Tensor's name. This string is used as a suffix -- when creating the TensorArray gradient object here (the attribute -- source). -- -- The attribute source is added as a suffix to the forward -- TensorArray's name when performing the creation / lookup, so that each -- separate gradient calculation gets its own TensorArray accumulator. tensorArrayGradV3 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 Float -> m' ((ResourceHandle, Tensor Value Float)) tensorArrayGradV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' ((ResourceHandle, Tensor Value Float)) tensorArrayPack :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype) tensorArrayPack' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype) tensorArrayRead :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) tensorArrayRead' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) -- | Deprecated. Use TensorArrayReadV3 tensorArrayReadV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype tensorArrayReadV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype -- | Read an element from the TensorArray into output value. tensorArrayReadV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) tensorArrayReadV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) tensorArrayScatter :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) tensorArrayScatter' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) -- | Deprecated. Use TensorArrayScatterV3 tensorArrayScatterV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float tensorArrayScatterV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float -- | Scatter the data from the input value into specific TensorArray -- elements. -- -- indices must be a vector, its length must match the first dim -- of value. tensorArrayScatterV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) tensorArrayScatterV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) tensorArraySize :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value Int32) tensorArraySize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value Int32) -- | Deprecated. Use TensorArraySizeV3 tensorArraySizeV2 :: Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build Int32 tensorArraySizeV2' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build Int32 -- | Get the current size of the TensorArray. tensorArraySizeV3 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value Int32) tensorArraySizeV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value Int32) tensorArraySplit :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) tensorArraySplit' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) -- | Deprecated. Use TensorArraySplitV3 tensorArraySplitV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> Tensor Build Float tensorArraySplitV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> Tensor Build Float -- | Split the data from the input value into TensorArray elements. -- -- Assuming that lengths takes on values -- -- ```(n0, n1, ..., n(T-1))``` -- -- and that value has shape -- -- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```, -- -- this splits values into a TensorArray with T tensors. -- -- TensorArray index t will be the subtensor of values with starting -- position -- -- ```(n0 + n1 + ... + n(t-1), 0, 0, ...)``` -- -- and having size -- -- ```nt x d0 x d1 x ...``` tensorArraySplitV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) tensorArraySplitV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) tensorArrayUnpack :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Float -> m' (Tensor Value Float) tensorArrayUnpack' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Float -> m' (Tensor Value Float) -- | Deprecated. Use TensorArrayV3 tensorArrayV2 :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' (Tensor Value ByteString) tensorArrayV2' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' (Tensor Value ByteString) -- | An array of Tensors of given size, with data written via Write and -- read -- -- via Read or Pack. tensorArrayV3 :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' ((ResourceHandle, Tensor Value Float)) tensorArrayV3' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' ((ResourceHandle, Tensor Value Float)) tensorArrayWrite :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) tensorArrayWrite' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) -- | Deprecated. Use TensorArrayGradV3 tensorArrayWriteV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float tensorArrayWriteV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float -- | Push an element onto the tensor_array. tensorArrayWriteV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) tensorArrayWriteV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) -- | Outputs a Summary protocol buffer with a tensor. tensorSummary :: (TensorType t) => Tensor v'1 t -> Tensor Build ByteString tensorSummary' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString -- | A Reader that outputs the lines of a file delimited by '\n'. textLineReader :: (MonadBuild m') => m' (Tensor Ref ByteString) textLineReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) -- | A Reader that outputs the lines of a file delimited by '\n'. textLineReaderV2 :: (MonadBuild m') => m' (ResourceHandle) textLineReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) -- | Generates labels for candidate sampling with a learned unigram -- distribution. -- -- See explanations of candidate sampling and the data formats at -- go/candidate-sampling. -- -- For each batch, this op picks a single set of sampled candidate -- labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage -- is that the sampled candidates must be chosen independently of the -- context and of the true labels. threadUnsafeUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) threadUnsafeUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -- | Constructs a tensor by tiling a given tensor. -- -- This operation creates a new tensor by replicating input -- multiples times. The output tensor's i'th dimension has -- `input.dims(i) * multiples[i]` elements, and the values of -- input are replicated `multiples[i]` times along the -- ith dimension. For example, tiling `[a b c d]` by `[2]` -- produces `[a b c d a b c d]`. tile :: (TensorType t, OneOf '[Int32, Int64] tmultiples) => Tensor v'1 t -> Tensor v'2 tmultiples -> Tensor Build t tile' :: (TensorType t, OneOf '[Int32, Int64] tmultiples) => OpParams -> Tensor v'1 t -> Tensor v'2 tmultiples -> Tensor Build t -- | Returns the gradient of Tile. -- -- Since Tile takes an input and repeats the input -- multiples times along each dimension, TileGrad takes -- in multiples and aggregates each repeated tile of -- input into output. tileGrad :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t tileGrad' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t -- | Finds values and indices of the k largest elements for the -- last dimension. -- -- If the input is a vector (rank-1), finds the k largest -- entries in the vector and outputs their values and indices as vectors. -- Thus `values[j]` is the j-th largest entry in input, -- and its index is `indices[j]`. -- -- For matrices (resp. higher rank input), computes the top k -- entries in each row (resp. vector along the last dimension). Thus, -- -- values.shape = indices.shape = input.shape[:-1] + [k] -- -- If two elements are equal, the lower-index element appears first. -- -- If k varies dynamically, use TopKV2 below. topK :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Int64 -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int32) topK' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Int64 -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int32) -- | Finds values and indices of the k largest elements for the -- last dimension. -- -- If the input is a vector (rank-1), finds the k largest -- entries in the vector and outputs their values and indices as vectors. -- Thus `values[j]` is the j-th largest entry in input, -- and its index is `indices[j]`. -- -- For matrices (resp. higher rank input), computes the top k -- entries in each row (resp. vector along the last dimension). Thus, -- -- values.shape = indices.shape = input.shape[:-1] + [k] -- -- If two elements are equal, the lower-index element appears first. topKV2 :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> (Tensor Build t, Tensor Build Int32) topKV2' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> (Tensor Build t, Tensor Build Int32) -- | Shuffle dimensions of x according to a permutation. -- -- The output y has the same rank as x. The shapes of -- x and y satisfy: `y.shape[i] == x.shape[perm[i]] for -- i in [0, 1, ..., rank(x) - 1]` transpose :: (TensorType t, OneOf '[Int32, Int64] tperm) => Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t transpose' :: (TensorType t, OneOf '[Int32, Int64] tperm) => OpParams -> Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t -- | Returns x / y element-wise for integer types. -- -- Truncation designates that negative numbers will round fractional -- quantities toward zero. I.e. -7 / 5 = 1. This matches C semantics but -- it is different than Python semantics. See FloorDiv for a -- division function that matches Python Semantics. -- -- truncateDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t truncateDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Returns element-wise remainder of division. This emulates C semantics -- where -- -- true, this follows C semantics in that the result here is consistent -- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. -- -- truncateMod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t truncateMod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Outputs random values from a truncated normal distribution. -- -- The generated values follow a normal distribution with mean 0 and -- standard deviation 1, except that values whose magnitude is more than -- 2 standard deviations from the mean are dropped and re-picked. truncatedNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype) truncatedNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype) -- | Generates labels for candidate sampling with a uniform distribution. -- -- See explanations of candidate sampling and the data formats at -- go/candidate-sampling. -- -- For each batch, this op picks a single set of sampled candidate -- labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage -- is that the sampled candidates must be chosen independently of the -- context and of the true labels. uniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) uniformCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) -- | Finds unique elements in a 1-D tensor. -- -- This operation returns a tensor y containing all of the -- unique elements of x sorted in the same order that they occur -- in x. This operation also returns a tensor idx the -- same size as x that contains the index of each value of -- x in the unique output y. In other words: -- -- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` -- -- For example: -- -- ```prettyprint # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, -- idx = unique(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, -- 3, 4, 4] ``` unique :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx) unique' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx) -- | Finds unique elements in a 1-D tensor. -- -- This operation returns a tensor y containing all of the -- unique elements of x sorted in the same order that they occur -- in x. This operation also returns a tensor idx the -- same size as x that contains the index of each value of -- x in the unique output y. Finally, it returns a -- third tensor count that contains the count of each element of -- y in x. In other words: -- -- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` -- -- For example: -- -- ```prettyprint # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, -- idx, count = unique_with_counts(x) y ==> [1, 2, 4, 7, 8] idx ==> -- [0, 0, 1, 2, 2, 2, 3, 4, 4] count ==> [2, 1, 3, 1, 2] ``` uniqueWithCounts :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) uniqueWithCounts' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) -- | Unpacks a given dimension of a rank-R tensor into -- num rank-`(R-1)` tensors. -- -- Unpacks num tensors from value by chipping it along -- the axis dimension. For example, given a tensor of shape `(A, -- B, C, D)`; -- -- If `axis == 0` then the i'th tensor in output is the slice -- `value[i, :, :, :]` and each tensor in output will have shape -- `(B, C, D)`. (Note that the dimension unpacked along is gone, unlike -- split). -- -- If `axis == 1` then the i'th tensor in output is the slice -- `value[:, i, :, :]` and each tensor in output will have shape -- `(A, C, D)`. Etc. -- -- This is the opposite of pack. unpack :: (TensorType t) => Int64 -> Tensor v'1 t -> [Tensor Build t] unpack' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> [Tensor Build t] -- | Computes the sum along segments of a tensor. -- -- Read the section on Segmentation for an explanation of -- segments. -- -- Computes a tensor such that `(output[i] = sum_{j...} data[j...]` where -- the sum is over tuples `j...` such that `segment_ids[j...] == i`. -- Unlike SegmentSum, segment_ids need not be sorted -- and need not cover all values in the full range of valid values. -- -- If the sum is empty for a given segment ID i, `output[i] = -- 0`. -- -- num_segments should equal the number of distinct segment IDs. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/UnsortedSegmentSum.png" alt /div unsortedSegmentSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor v'3 Int32 -> Tensor Build t unsortedSegmentSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor v'3 Int32 -> Tensor Build t -- | Op is similar to a lightweight Dequeue. The basic funtionality is -- similar to -- -- dequeue with many fewer capabilities and options. This Op is optimized -- for performance. unstage :: (MonadBuild m', TensorTypes dtypes) => m' (TensorList (Value) dtypes) unstage' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> m' (TensorList (Value) dtypes) -- | Creates a handle to a Variable resource. varHandleOp :: (MonadBuild m') => DataType -> Shape -> m' (ResourceHandle) varHandleOp' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (ResourceHandle) -- | Checks whether a resource handle-based variable has been initialized. varIsInitializedOp :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Bool) varIsInitializedOp' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Bool) -- | Use VariableV2 instead. variable :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) variable' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) -- | Holds state in the form of a tensor that persists across steps. -- -- Outputs a ref to the tensor state so it may be read or modified. -- TODO(zhifengc/mrry): Adds a pointer to a more detail document about -- sharing states in tensorflow. variableV2 :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) variableV2' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) -- | Returns locations of true values in a boolean tensor. -- -- This operation returns the coordinates of true elements in -- input. The coordinates are returned in a 2-D tensor where the -- first dimension (rows) represents the number of true elements, and the -- second dimension (columns) represents the coordinates of the true -- elements. Keep in mind, the shape of the output tensor can vary -- depending on how many true values there are in input. Indices -- are output in row-major order. -- -- For example: -- -- ```prettyprint # input tensor is [[True, False] # [True, -- False]] # input has two true values, so output has two -- coordinates. # input has rank of 2, so coordinates have two -- indices. where(input) ==> [[0, 0], [1, 0]] -- -- # input tensor is [[[True, False] # [True, False]] # [[False, -- True] # [False, True]] # [[False, False] # [False, True]]] # -- input has 5 true values, so output has 5 coordinates. # -- input has rank of 3, so coordinates have three indices. -- where(input) ==> [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, -- 1, 1]] ``` where' :: Tensor v'1 Bool -> Tensor Build Int64 where'' :: OpParams -> Tensor v'1 Bool -> Tensor Build Int64 -- | A Reader that outputs the entire contents of a file as a value. -- -- To use, enqueue filenames in a Queue. The output of ReaderRead will be -- a filename (key) and the contents of that file (value). wholeFileReader :: (MonadBuild m') => m' (Tensor Ref ByteString) wholeFileReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) -- | A Reader that outputs the entire contents of a file as a value. -- -- To use, enqueue filenames in a Queue. The output of ReaderRead will be -- a filename (key) and the contents of that file (value). wholeFileReaderV2 :: (MonadBuild m') => m' (ResourceHandle) wholeFileReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) -- | Writes contents to the file at input filename. Creates file if not -- existing. writeFile :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) writeFile' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) -- | Returns a tensor of zeros with the same shape and type as x. zerosLike :: (TensorType t) => Tensor v'1 t -> Tensor Build t zerosLike' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Compute the Hurwitz zeta function \(zeta(x, q)\). -- -- The Hurwitz zeta function is defined as: -- -- ``` zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x} ``` zeta :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t zeta' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | A graph node which represents an argument to a function. _Arg :: (MonadBuild m', TensorType t) => Int64 -> m' (Tensor Value t) _Arg' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> m' (Tensor Value t) -- | Converts an array of tensors to a list of tensors. _ArrayToList :: (TensorType t, TensorTypes out_types) => [Tensor v'1 t] -> TensorList (Build) out_types _ArrayToList' :: (TensorType t, TensorTypes out_types) => OpParams -> [Tensor v'1 t] -> TensorList (Build) out_types -- | Cast x of type SrcT to y of DstT. -- -- _HostCast requires its input and produces its output in host memory. _HostCast :: (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT _HostCast' :: (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT -- | Receives the named tensor from send_device on recv_device. -- -- _HostRecv requires its input on host memory whereas _Recv requires its -- input on device memory. _HostRecv :: (MonadBuild m', TensorType tensor_type) => Int64 -> m' (Tensor Value tensor_type) _HostRecv' :: (MonadBuild m', TensorType tensor_type) => OpParams -> Int64 -> m' (Tensor Value tensor_type) -- | Sends the named tensor from send_device to recv_device. -- -- _HostSend requires its input on host memory whereas _Send requires its -- input on device memory. _HostSend :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode) _HostSend' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode) -- | Converts a list of tensors to an array of tensors. _ListToArray :: (TensorTypes tin, TensorType t) => Int64 -> TensorList (v'1) tin -> [Tensor Build t] _ListToArray' :: (TensorTypes tin, TensorType t) => OpParams -> Int64 -> TensorList (v'1) tin -> [Tensor Build t] -- | Creates an empty Tensor with shape shape and type -- dtype. -- -- The memory can optionally be initialized. This is usually useful in -- conjunction with inplace operations. _ParallelConcatStart :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Value dtype) _ParallelConcatStart' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Value dtype) -- | Updates input value at loc with update. -- -- If you use this function you will almost certainly want to add a -- control dependency as done in the implementation of parallel_stack to -- avoid race conditions. _ParallelConcatUpdate :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t _ParallelConcatUpdate' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Receives the named tensor from send_device on recv_device. _Recv :: (MonadBuild m', TensorType tensor_type) => Int64 -> m' (Tensor Value tensor_type) _Recv' :: (MonadBuild m', TensorType tensor_type) => OpParams -> Int64 -> m' (Tensor Value tensor_type) -- | A graph node which represents a return value of a function. _Retval :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode) _Retval' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode) -- | Sends the named tensor from send_device to recv_device. _Send :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode) _Send' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode)