Safe Haskell | None |
---|---|
Language | Haskell2010 |
TensorFlow.Ops
Description
This module contains definitions for some built-in TensorFlow operations.
Note that certain, "stateful" ops like variable
and assign
return a
Build
action (e.g., Build (Tensor Ref a)
instead of a pure value; the
returned Tensor
s are always rendered in the current Build
context. This
approach helps us avoid problems with inlining or common subexpression
elimination, by writing
do v <- variable [] w <- assign v 3 render $ w * w
instead of
let v = variable [] w = assign v 3 in w * w
since the latter could be reasonably transformed by the compiler into (or vice versa)
let v = variable [] w = assign v 3 w' = assign v 3 in w * w'
Ops should return a Build
action if their original OpDef
marks them as
stateful, or if they take any Refs as input. (This mirrors the rules that
TensorFlow uses to avoid common subexpression elimination.)
- add :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t
- abs :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t) => Tensor v1 t -> Tensor Value t
- addN :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) => [Tensor v1 t] -> Tensor Value t
- argMax :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value Int64
- assign :: TensorType t => Tensor Ref t -> Tensor v2 t -> Build (Tensor Ref t)
- broadcastGradientArgs :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) t) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value t)
- cast :: (TensorType srcT, TensorType dstT) => Tensor v1 srcT -> Tensor Value dstT
- concat :: TensorType t => Tensor v1 Int32 -> [Tensor v2 t] -> Tensor Value t
- constant :: forall a. TensorType a => Shape -> [a] -> Tensor Value a
- equal :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool
- expandDims :: TensorType t => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t
- initializedVariable :: forall a. TensorType a => Tensor Value a -> Build (Tensor Ref a)
- zeroInitializedVariable :: (TensorType a, Num a) => Shape -> Build (Tensor Ref a)
- fill :: TensorType t => Tensor v1 Int32 -> Tensor v2 t -> Tensor Value t
- oneHot :: (TensorType t, TensorType tI, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) => Tensor v1 tI -> Tensor v2 Int32 -> Tensor v3 t -> Tensor v4 t -> Tensor Value t
- matMul :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t
- matTranspose :: forall a v. TensorType a => Tensor v a -> Tensor Value a
- mean :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t
- mul :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t
- neg :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) => Tensor v1 t -> Tensor Value t
- pack :: TensorType t => [Tensor v1 t] -> Tensor Value t
- placeholder :: forall a. TensorType a => Shape -> Build (Tensor Value a)
- range :: (TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx) => Tensor v1 tidx -> Tensor v2 tidx -> Tensor v3 tidx -> Tensor Value tidx
- reducedShape :: (OneOf `[Int32, Int64]` t1, OneOf `[Int32, Int64]` t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Value Int32
- relu :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) => Tensor v1 t -> Tensor Value t
- reluGrad :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t
- reshape :: (TensorType t, TensorType tshape, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) => Tensor v1 t -> Tensor v2 tshape -> Tensor Value t
- restore :: forall a. TensorType a => ByteString -> Tensor Ref a -> Build ControlNode
- restoreFromName :: forall a. TensorType a => ByteString -> ByteString -> Tensor Ref a -> Build ControlNode
- save :: forall a v. TensorType a => ByteString -> [Tensor v a] -> Build ControlNode
- scalar :: forall a. TensorType a => a -> Tensor Value a
- shape :: TensorType t => Tensor v1 t -> Tensor Value Int32
- sign :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) => Tensor v1 t -> Tensor Value t
- size :: (TensorType t, TensorType out_type, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) => Tensor v1 t -> Tensor Value out_type
- softmax :: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) => Tensor v1 t -> Tensor Value t
- softmaxCrossEntropyWithLogits :: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value t)
- sparseToDense :: (TensorType t, TensorType tindices, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) => Tensor v1 tindices -> Tensor v2 tindices -> Tensor v3 t -> Tensor v4 t -> Tensor Value t
- sub :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t
- sum :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t
- transpose :: (TensorType t, TensorType tperm, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) => Tensor v1 t -> Tensor v2 tperm -> Tensor Value t
- truncatedNormal :: forall a v. TensorType a => Tensor v Int64 -> Build (Tensor Value a)
- variable :: TensorType dtype => Shape -> Build (Tensor Ref dtype)
- vector :: TensorType a => [a] -> Tensor Value a
- zeros :: forall a. (Num a, TensorType a) => Shape -> Tensor Value a
- zerosLike :: TensorType t => Tensor v1 t -> Tensor Value t
- scalarize :: TensorType a => Tensor v a -> Tensor Value a
Documentation
Arguments
:: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t) | |
=> Tensor v1 t | x |
-> Tensor v2 t | y |
-> Tensor Value t | z |
Returns x + y element-wise.
- NOTE*:
Add
supports broadcasting.AddN
does not. More about broadcasting here
Arguments
:: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t) | |
=> Tensor v1 t | x |
-> Tensor Value t | y |
Computes the absolute value of a tensor.
Given a tensor x
, this operation returns a tensor containing the absolute
value of each element in x
. For example, if x is an input element and y is
an output element, this operation computes \(y = |x|\).
Arguments
:: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) | |
=> [Tensor v1 t] | inputs: Must all be the same size and shape. |
-> Tensor Value t | sum |
Add all input tensors element wise.
Arguments
:: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) | |
=> Tensor v1 t | input |
-> Tensor v2 tidx | dimension: int32, 0 <= dimension < rank(input). Describes which dimension of the input Tensor to reduce across. For vectors, use dimension = 0. |
-> Tensor Value Int64 | output |
Returns the index with the largest value across dimensions of a tensor.
Arguments
:: TensorType t | |
=> Tensor Ref t | ref: Should be from a |
-> Tensor v2 t | value: The value to be assigned to the variable. |
-> Build (Tensor Ref t) | output_ref: = Same as "ref". Returned as a convenience for operations that want to use the new value after the variable has been reset. |
Update ref
by assigning value
to it.
This operation outputs "ref" after the assignment is done. This makes it easier to chain operations that need to use the reset value.
Arguments
:: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) t) | |
=> Tensor v1 t | s0 |
-> Tensor v2 t | s1 |
-> (Tensor Value t, Tensor Value t) | (r0, r1)
|
Return the reduction indices for computing gradients of s0 op s1 with broadcast.
This is typically used by gradient computations for a broadcasting operation.
Arguments
:: (TensorType srcT, TensorType dstT) | |
=> Tensor v1 srcT | x |
-> Tensor Value dstT | y |
Cast x of type SrcT to y of DstT.
Arguments
:: TensorType t | |
=> Tensor v1 Int32 | concat_dim: 0-D. The dimension along which to concatenate. Must be in the range [0, rank(values)). |
-> [Tensor v2 t] | values: The |
-> Tensor Value t | output: A |
Concatenates tensors along one dimension.
constant :: forall a. TensorType a => Shape -> [a] -> Tensor Value a Source
Create a constant tensor.
The values should be in row major order, e.g.,
element 0: index (0, ..., 0) element 1: index (0, ..., 1) ...
Arguments
:: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t) | |
=> Tensor v1 t | x |
-> Tensor v2 t | y |
-> Tensor Value Bool | z |
Returns the truth value of (x == y) element-wise.
- NOTE*:
Equal
supports broadcasting. More about broadcasting here
expandDims :: TensorType t => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t Source
initializedVariable :: forall a. TensorType a => Tensor Value a -> Build (Tensor Ref a) Source
Creates a variable initialized to the given value. Initialization happens next time session runs.
zeroInitializedVariable :: (TensorType a, Num a) => Shape -> Build (Tensor Ref a) Source
Creates a zero-initialized variable with the given shape.
Arguments
:: TensorType t | |
=> Tensor v1 Int32 | dims: 1-D. Represents the shape of the output tensor. |
-> Tensor v2 t | value: 0-D (scalar). Value to fill the returned tensor.
|
-> Tensor Value t | output |
Creates a tensor filled with a scalar value.
This operation creates a tensor of shape dims
and fills it with value
.
For example:
```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9) ==> [[9, 9, 9] [9, 9, 9]] ```
Arguments
:: (TensorType t, TensorType tI, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) | |
=> Tensor v1 tI | indices: A tensor of indices. |
-> Tensor v2 Int32 | depth: A scalar defining the depth of the one hot dimension. |
-> Tensor v3 t | on_value: A scalar defining the value to fill in output when `indices[j] = i`. |
-> Tensor v4 t | off_value: A scalar defining the value to fill in output when `indices[j] != i`. |
-> Tensor Value t | output: The one-hot tensor. |
Returns a one-hot tensor.
The locations represented by indices in indices
take value on_value
,
while all other locations take value off_value
.
If the input indices
is rank N
, the output will have rank `N+1`,
The new axis is created at dimension axis
(default: the new axis is
appended at the end).
If indices
is a scalar the output shape will be a vector of length depth
.
If indices
is a vector of length features
, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If indices
is a matrix (batch) with shape `[batch, features]`,
the output shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
Examples =========
Suppose that
``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 axis = -1 ```
Then output is `[4 x 3]`:
```output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ```
Suppose that
``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0 axis = 0 ```
Then output is `[3 x 4]`:
```output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^ one_hot(1) ``` Suppose that
``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value = 0.0 axis = -1 ```
Then output is `[2 x 2 x 3]`:
```output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // one_hot(-1) ]```
Arguments
:: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t) | |
=> Tensor v1 t | a |
-> Tensor v2 t | b |
-> Tensor Value t | product |
Multiply the matrix "a" by the matrix "b".
The inputs must be two-dimensional matrices and the inner dimension of "a" (after being transposed if transpose_a is true) must match the outer dimension of "b" (after being transposed if transposed_b is true).
- Note*: The default kernel implementation for MatMul on GPUs uses cublas.
matTranspose :: forall a v. TensorType a => Tensor v a -> Tensor Value a Source
Arguments
:: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) | |
=> Tensor v1 t | input: The tensor to reduce. |
-> Tensor v2 tidx | reduction_indices: The dimensions to reduce. |
-> Tensor Value t | output: The reduced tensor. |
Computes the mean of elements across dimensions of a tensor.
Reduces input
along the dimensions given in reduction_indices
. Unless
keep_dims
is true, the rank of the tensor is reduced by 1 for each entry in
reduction_indices
. If keep_dims
is true, the reduced dimensions are
retained with length 1.
Arguments
:: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) | |
=> Tensor v1 t | x |
-> Tensor v2 t | y |
-> Tensor Value t | z |
Returns x * y element-wise.
- NOTE*:
Mul
supports broadcasting. More about broadcasting here
Arguments
:: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) | |
=> Tensor v1 t | x |
-> Tensor Value t | y |
Computes numerical negative value element-wise.
I.e., \(y = -x\).
Arguments
:: TensorType t | |
=> [Tensor v1 t] | values: Must be of same shape and type. |
-> Tensor Value t | output: The packed tensor. |
Packs a list of N
rank-R
tensors into one rank-`(R+1)` tensor.
Packs the N
tensors in values
into a tensor with rank one higher than each
tensor in values
, by packing them along the axis
dimension.
Given a list of tensors of shape `(A, B, C)`;
if `axis == 0` then the output
tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the output
tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```prettyprint
# x
is [1, 4]
# y
is [2, 5]
# z
is [3, 6]
pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unpack
.
placeholder :: forall a. TensorType a => Shape -> Build (Tensor Value a) Source
Arguments
:: (TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx) | |
=> Tensor v1 tidx | start: 0-D (scalar). First entry in the sequence. |
-> Tensor v2 tidx | limit: 0-D (scalar). Upper limit of sequence, exclusive. |
-> Tensor v3 tidx | delta: 0-D (scalar). Optional. Default is 1. Number that increments |
-> Tensor Value tidx | output: 1-D. |
Creates a sequence of numbers.
This operation creates a sequence of numbers that begins at start
and
extends by increments of delta
up to but not including limit
.
For example:
```
# start
is 3
# limit
is 18
# delta
is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
```
reducedShape :: (OneOf `[Int32, Int64]` t1, OneOf `[Int32, Int64]` t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Value Int32 Source
Helper function for reduction ops (translation of math_ops.reduced_shape).
Arguments
:: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) | |
=> Tensor v1 t | features |
-> Tensor Value t | activations |
Computes rectified linear: `max(features, 0)`.
Arguments
:: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) | |
=> Tensor v1 t | gradients: The backpropagated gradients to the corresponding Relu operation. |
-> Tensor v2 t | features: The features passed as input to the corresponding Relu operation, OR the outputs of that operation (both work equivalently). |
-> Tensor Value t | backprops: `gradients * (features > 0)`. |
Computes rectified linear gradients for a Relu operation.
Arguments
:: (TensorType t, TensorType tshape, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) | |
=> Tensor v1 t | tensor |
-> Tensor v2 tshape | shape: Defines the shape of the output tensor. |
-> Tensor Value t | output |
Reshapes a tensor.
Given tensor
, this operation returns a tensor that has the same values
as tensor
with shape shape
.
If one component of shape
is the special value -1, the size of that dimension
is computed so that the total size remains constant. In particular, a shape
of `[-1]` flattens into 1-D. At most one component of shape
can be -1.
If shape
is 1-D or higher, then the operation returns a tensor with shape
shape
filled with the values of tensor
. In this case, the number of elements
implied by shape
must be the same as the number of elements in tensor
.
For example:
```prettyprint
# tensor t
is [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor t
has shape [9]
reshape(t, [3, 3]) ==> [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
# tensor t
is [[[1, 1], [2, 2]],
# [[3, 3], [4, 4]]]
# tensor t
has shape [2, 2, 2]
reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
[3, 3, 4, 4]]
# tensor t
is [[[1, 1, 1],
# [2, 2, 2]],
# [[3, 3, 3],
# [4, 4, 4]],
# [[5, 5, 5],
# [6, 6, 6]]]
# tensor t
has shape [3, 2, 3]
# pass '[-1]' to flatten t
reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
# -1 can also be used to infer the shape
# -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2: reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, 6, 6]]]
# tensor t
is [7]
# shape `[]` reshapes to a scalar
reshape(t, []) ==> 7
```
Arguments
:: TensorType a | |
=> ByteString | File path. |
-> Tensor Ref a | Tensor to restore. |
-> Build ControlNode |
Restore a tensor's value from a checkpoint file.
Arguments
:: TensorType a | |
=> ByteString | File path. |
-> ByteString | Tensor name override. |
-> Tensor Ref a | Tensor to restore. |
-> Build ControlNode |
Restore a tensor's value from a checkpoint file.
This version allows restoring from a checkpoint file that uses a different tensor name than the variable.
Arguments
:: TensorType a | |
=> ByteString | File path. |
-> [Tensor v a] | Tensors to save. |
-> Build ControlNode |
scalar :: forall a. TensorType a => a -> Tensor Value a Source
Create a constant scalar.
Arguments
:: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) | |
=> Tensor v1 t | x |
-> Tensor Value t | y |
Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Arguments
:: (TensorType t, TensorType out_type, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) | |
=> Tensor v1 t | input |
-> Tensor Value out_type | output |
Returns the size of a tensor.
This operation returns an integer representing the number of elements in
input
.
For example:
```prettyprint
# t
is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
size(t) ==> 12
```
Arguments
:: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) | |
=> Tensor v1 t | logits: 2-D with shape `[batch_size, num_classes]`. |
-> Tensor Value t | softmax: Same shape as |
Computes softmax activations.
For each batch i
and class j
we have
softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
Arguments
:: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) | |
=> Tensor v1 t | features: batch_size x num_classes matrix |
-> Tensor v2 t | labels: batch_size x num_classes matrix The caller must ensure that each batch of labels represents a valid probability distribution. |
-> (Tensor Value t, Tensor Value t) | (loss, backprop)
|
Computes softmax cross entropy cost and gradients to backpropagate.
Inputs are the logits, not probabilities.
Arguments
:: (TensorType t, TensorType tindices, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) | |
=> Tensor v1 tindices | sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete index where `sparse_values[i]` will be placed. |
-> Tensor v2 tindices | output_shape: 1-D. Shape of the dense output tensor. |
-> Tensor v3 t | sparse_values: 1-D. Values corresponding to each row of |
-> Tensor v4 t | default_value: Scalar value to set for indices not specified in
|
-> Tensor Value t | dense: Dense output tensor of shape |
Converts a sparse representation into a dense tensor.
Builds an array dense
with shape output_shape
such that
```prettyprint # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n) dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] ```
All other values in dense
are set to default_value
. If sparse_values
is a
scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If validate_indices
is true, these properties
are checked during execution.
Arguments
:: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) | |
=> Tensor v1 t | x |
-> Tensor v2 t | y |
-> Tensor Value t | z |
Returns x - y element-wise.
- NOTE*:
Sub
supports broadcasting. More about broadcasting here
Arguments
:: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) | |
=> Tensor v1 t | input: The tensor to reduce. |
-> Tensor v2 tidx | reduction_indices: The dimensions to reduce. |
-> Tensor Value t | output: The reduced tensor. |
Computes the sum of elements across dimensions of a tensor.
Reduces input
along the dimensions given in reduction_indices
. Unless
keep_dims
is true, the rank of the tensor is reduced by 1 for each entry in
reduction_indices
. If keep_dims
is true, the reduced dimensions are
retained with length 1.
Arguments
:: (TensorType t, TensorType tperm, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) | |
=> Tensor v1 t | x |
-> Tensor v2 tperm | perm |
-> Tensor Value t | y |
Shuffle dimensions of x according to a permutation.
The output y
has the same rank as x
. The shapes of x
and y
satisfy:
`y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
Arguments
:: TensorType dtype | |
=> Shape | shape: The shape of the variable tensor. |
-> Build (Tensor Ref dtype) | ref: A reference to the variable tensor. |
Holds state in the form of a tensor that persists across steps.
Outputs a ref to the tensor state so it may be read or modified. TODO(zhifengc/mrry): Adds a pointer to a more detail document about sharing states in tensorflow.
vector :: TensorType a => [a] -> Tensor Value a Source
Create a constant vector.
Arguments
:: TensorType t | |
=> Tensor v1 t | x: a tensor of type T. |
-> Tensor Value t | y: a tensor of the same shape and type as x but filled with zeros. |
Returns a tensor of zeros with the same shape and type as x.