Safe Haskell | None |
---|---|
Language | Haskell2010 |
- abort :: forall m'. MonadBuild m' => m' ControlNode
- abort' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode
- abs :: forall v'1 t. OneOf `[Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- abs' :: forall v'1 t. OneOf `[Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- accumulatorApplyGradient :: forall v'2 v'3 dtype m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) => Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 dtype -> m' ControlNode
- accumulatorApplyGradient' :: forall v'2 v'3 dtype m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 dtype -> m' ControlNode
- accumulatorNumAccumulated :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' (Tensor Value Int32)
- accumulatorNumAccumulated' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32)
- accumulatorSetGlobalStep :: forall v'2 m'. MonadBuild m' => Tensor Ref ByteString -> Tensor v'2 Int64 -> m' ControlNode
- accumulatorSetGlobalStep' :: forall v'2 m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int64 -> m' ControlNode
- accumulatorTakeGradient :: forall v'2 dtype m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value dtype)
- accumulatorTakeGradient' :: forall v'2 dtype m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value dtype)
- acos :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- acos' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- add :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- add' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- addManySparseToTensorsMap :: forall v'1 v'2 v'3 t m'. (MonadBuild m', TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64)
- addManySparseToTensorsMap' :: forall v'1 v'2 v'3 t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64)
- addN :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => [Tensor v'1 t] -> Tensor Build t
- addN' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> [Tensor v'1 t] -> Tensor Build t
- addSparseToTensorsMap :: forall v'1 v'2 v'3 t m'. (MonadBuild m', TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64)
- addSparseToTensorsMap' :: forall v'1 v'2 v'3 t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64)
- adjustContrast :: forall v'1 v'2 v'3 v'4 t. OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor Build Float
- adjustContrast' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor Build Float
- adjustContrastv2 :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
- adjustContrastv2' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
- adjustHue :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
- adjustHue' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
- adjustSaturation :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
- adjustSaturation' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
- all :: forall v'1 v'2 tidx. OneOf `[Int32, Int64]` tidx => Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool
- all' :: forall v'1 v'2 tidx. OneOf `[Int32, Int64]` tidx => OpParams -> Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool
- allCandidateSampler :: Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- allCandidateSampler' :: OpParams -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- any :: forall v'1 v'2 tidx. OneOf `[Int32, Int64]` tidx => Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool
- any' :: forall v'1 v'2 tidx. OneOf `[Int32, Int64]` tidx => OpParams -> Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool
- applyAdadelta :: forall v'4 v'5 v'6 v'7 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (Tensor Ref t)
- applyAdadelta' :: forall v'4 v'5 v'6 v'7 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (Tensor Ref t)
- applyAdagrad :: forall v'3 v'4 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> m' (Tensor Ref t)
- applyAdagrad' :: forall v'3 v'4 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> m' (Tensor Ref t)
- applyAdagradDA :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (Tensor Ref t)
- applyAdagradDA' :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (Tensor Ref t)
- applyAdam :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (Tensor Ref t)
- applyAdam' :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (Tensor Ref t)
- applyCenteredRMSProp :: forall v'5 v'6 v'7 v'8 v'9 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t)
- applyCenteredRMSProp' :: forall v'5 v'6 v'7 v'8 v'9 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t)
- applyFtrl :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t)
- applyFtrl' :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t)
- applyGradientDescent :: forall v'2 v'3 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> m' (Tensor Ref t)
- applyGradientDescent' :: forall v'2 v'3 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> m' (Tensor Ref t)
- applyMomentum :: forall v'3 v'4 v'5 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t)
- applyMomentum' :: forall v'3 v'4 v'5 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t)
- applyProximalAdagrad :: forall v'3 v'4 v'5 v'6 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (Tensor Ref t)
- applyProximalAdagrad' :: forall v'3 v'4 v'5 v'6 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (Tensor Ref t)
- applyProximalGradientDescent :: forall v'2 v'3 v'4 v'5 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t)
- applyProximalGradientDescent' :: forall v'2 v'3 v'4 v'5 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t)
- applyRMSProp :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t)
- applyRMSProp' :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t)
- argMax :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64
- argMax' :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64
- argMin :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64
- argMin' :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64
- asString :: forall v'1 t. OneOf `[Complex Float, Bool, Int32, Int64, Int8, Double, Float]` t => Tensor v'1 t -> Tensor Build ByteString
- asString' :: forall v'1 t. OneOf `[Complex Float, Bool, Int32, Int64, Int8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build ByteString
- asin :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- asin' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- assert :: forall v'1 v'2 t m'. (MonadBuild m', TensorTypes t) => Tensor v'1 Bool -> TensorList v'2 t -> m' ControlNode
- assert' :: forall v'1 v'2 t m'. (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 Bool -> TensorList v'2 t -> m' ControlNode
- assign :: forall v'2 t m'. (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
- assign' :: forall v'2 t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
- assignAdd :: forall v'2 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
- assignAdd' :: forall v'2 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
- assignAddVariableOp :: forall v'2 dtype m'. (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 dtype -> m' ControlNode
- assignAddVariableOp' :: forall v'2 dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 dtype -> m' ControlNode
- assignSub :: forall v'2 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
- assignSub' :: forall v'2 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t)
- assignVariableOp :: forall v'2 dtype m'. (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 dtype -> m' ControlNode
- assignVariableOp' :: forall v'2 dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 dtype -> m' ControlNode
- atan :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- atan' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- audioSummary :: Float -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build ByteString
- audioSummary' :: OpParams -> Float -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build ByteString
- audioSummaryV2 :: Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build ByteString
- audioSummaryV2' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build ByteString
- avgPool :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- avgPool' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- avgPool3D :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- avgPool3D' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- avgPool3DGrad :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
- avgPool3DGrad' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
- avgPoolGrad :: forall v'1 v'2 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
- avgPoolGrad' :: forall v'1 v'2 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
- barrier :: forall m'. MonadBuild m' => [DataType] -> m' (Tensor Ref ByteString)
- barrier' :: forall m'. MonadBuild m' => OpParams -> [DataType] -> m' (Tensor Ref ByteString)
- barrierClose :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' ControlNode
- barrierClose' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' ControlNode
- barrierIncompleteSize :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' (Tensor Value Int32)
- barrierIncompleteSize' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32)
- barrierInsertMany :: forall v'2 v'3 t m'. (MonadBuild m', TensorType t) => Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> Tensor v'3 t -> m' ControlNode
- barrierInsertMany' :: forall v'2 v'3 t m'. (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> Tensor v'3 t -> m' ControlNode
- barrierReadySize :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' (Tensor Value Int32)
- barrierReadySize' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32)
- barrierTakeMany :: forall v'2 component_types m'. (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value Int64, Tensor Value ByteString, TensorList Value component_types)
- barrierTakeMany' :: forall v'2 component_types m'. (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value Int64, Tensor Value ByteString, TensorList Value component_types)
- batchCholesky :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- batchCholesky' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- batchCholeskyGrad :: forall v'1 v'2 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- batchCholeskyGrad' :: forall v'1 v'2 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- batchFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchIFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchIFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchIFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchIFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchIFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchIFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- batchMatMul :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- batchMatMul' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- batchMatrixBandPart :: forall v'1 v'2 v'3 t. TensorType t => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t
- batchMatrixBandPart' :: forall v'1 v'2 v'3 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t
- batchMatrixDeterminant :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- batchMatrixDeterminant' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- batchMatrixDiag :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- batchMatrixDiag' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- batchMatrixDiagPart :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- batchMatrixDiagPart' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- batchMatrixInverse :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- batchMatrixInverse' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- batchMatrixSetDiag :: forall v'1 v'2 t. TensorType t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- batchMatrixSetDiag' :: forall v'1 v'2 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- batchMatrixSolve :: forall v'1 v'2 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- batchMatrixSolve' :: forall v'1 v'2 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- batchMatrixSolveLs :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t
- batchMatrixSolveLs' :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t
- batchMatrixTriangularSolve :: forall v'1 v'2 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- batchMatrixTriangularSolve' :: forall v'1 v'2 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- batchNormWithGlobalNormalization :: forall v'1 v'2 v'3 v'4 v'5 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor Build t
- batchNormWithGlobalNormalization' :: forall v'1 v'2 v'3 v'4 v'5 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor Build t
- batchNormWithGlobalNormalizationGrad :: forall v'1 v'2 v'3 v'4 v'5 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
- batchNormWithGlobalNormalizationGrad' :: forall v'1 v'2 v'3 v'4 v'5 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
- batchSelfAdjointEig :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- batchSelfAdjointEig' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- batchSelfAdjointEigV2 :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> (Tensor Build t, Tensor Build t)
- batchSelfAdjointEigV2' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t)
- batchSvd :: forall v'1 t. OneOf `[Complex Double, Complex Float, Double, Float]` t => Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t)
- batchSvd' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Double, Float]` t => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t)
- batchToSpace :: forall v'1 v'2 t tidx. (TensorType t, OneOf `[Int32, Int64]` tidx) => Int64 -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- batchToSpace' :: forall v'1 v'2 t tidx. (TensorType t, OneOf `[Int32, Int64]` tidx) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- batchToSpaceND :: forall v'1 v'2 v'3 t tblock_shape tcrops. (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tcrops) => Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tcrops -> Tensor Build t
- batchToSpaceND' :: forall v'1 v'2 v'3 t tblock_shape tcrops. (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tcrops) => OpParams -> Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tcrops -> Tensor Build t
- betainc :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- betainc' :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- biasAdd :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- biasAdd' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- biasAddGrad :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- biasAddGrad' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- biasAddV1 :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- biasAddV1' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- bitcast :: forall v'1 t type'. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` type') => Tensor v'1 t -> Tensor Build type'
- bitcast' :: forall v'1 t type'. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` type') => OpParams -> Tensor v'1 t -> Tensor Build type'
- broadcastArgs :: forall v'1 v'2 t. OneOf `[Int32, Int64]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- broadcastArgs' :: forall v'1 v'2 t. OneOf `[Int32, Int64]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- broadcastGradientArgs :: forall v'1 v'2 t. OneOf `[Int32, Int64]` t => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
- broadcastGradientArgs' :: forall v'1 v'2 t. OneOf `[Int32, Int64]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
- cTCBeamSearchDecoder :: Int64 -> Int64 -> Tensor v'1 Float -> Tensor v'2 Int32 -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float)
- cTCBeamSearchDecoder' :: OpParams -> Int64 -> Int64 -> Tensor v'1 Float -> Tensor v'2 Int32 -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float)
- cTCGreedyDecoder :: Tensor v'1 Float -> Tensor v'2 Int32 -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float)
- cTCGreedyDecoder' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int32 -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float)
- cTCLoss :: Tensor v'1 Float -> Tensor v'2 Int64 -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> (Tensor Build Float, Tensor Build Float)
- cTCLoss' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int64 -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> (Tensor Build Float, Tensor Build Float)
- cast :: forall v'1 srcT dstT. (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT
- cast' :: forall v'1 srcT dstT. (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT
- ceil :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- ceil' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- checkNumerics :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- checkNumerics' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- cholesky :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- cholesky' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- choleskyGrad :: forall v'1 v'2 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- choleskyGrad' :: forall v'1 v'2 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- complex :: forall v'1 v'2 t tout. (OneOf `[Double, Float]` t, OneOf `[Complex Double, Complex Float]` tout) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build tout
- complex' :: forall v'1 v'2 t tout. (OneOf `[Double, Float]` t, OneOf `[Complex Double, Complex Float]` tout) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build tout
- complexAbs :: forall v'1 t tout. (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) => Tensor v'1 t -> Tensor Build tout
- complexAbs' :: forall v'1 t tout. (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) => OpParams -> Tensor v'1 t -> Tensor Build tout
- computeAccidentalHits :: Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float)
- computeAccidentalHits' :: OpParams -> Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float)
- concat :: forall v'1 v'2 t. TensorType t => Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t
- concat' :: forall v'1 v'2 t. TensorType t => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t
- concatOffset :: Tensor v'1 Int32 -> [Tensor v'2 Int32] -> [Tensor Build Int32]
- concatOffset' :: OpParams -> Tensor v'1 Int32 -> [Tensor v'2 Int32] -> [Tensor Build Int32]
- concatV2 :: forall v'1 v'2 t tidx. (TensorType t, OneOf `[Int32, Int64]` tidx) => [Tensor v'1 t] -> Tensor v'2 tidx -> Tensor Build t
- concatV2' :: forall v'1 v'2 t tidx. (TensorType t, OneOf `[Int32, Int64]` tidx) => OpParams -> [Tensor v'1 t] -> Tensor v'2 tidx -> Tensor Build t
- conditionalAccumulator :: forall m'. MonadBuild m' => DataType -> Shape -> m' (Tensor Ref ByteString)
- conditionalAccumulator' :: forall m'. MonadBuild m' => OpParams -> DataType -> Shape -> m' (Tensor Ref ByteString)
- conj :: forall v'1 t. OneOf `[Complex Double, Complex Float]` t => Tensor v'1 t -> Tensor Build t
- conj' :: forall v'1 t. OneOf `[Complex Double, Complex Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- const :: forall dtype. TensorType dtype => Tensor Build dtype
- const' :: forall dtype. TensorType dtype => OpParams -> Tensor Build dtype
- controlTrigger :: forall m'. MonadBuild m' => m' ControlNode
- controlTrigger' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode
- conv2D :: forall v'1 v'2 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- conv2D' :: forall v'1 v'2 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- conv2DBackpropFilter :: forall v'1 v'2 v'3 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
- conv2DBackpropFilter' :: forall v'1 v'2 v'3 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
- conv2DBackpropInput :: forall v'1 v'2 v'3 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- conv2DBackpropInput' :: forall v'1 v'2 v'3 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- conv3D :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- conv3D' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- conv3DBackpropFilter :: forall v'1 v'2 v'3 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- conv3DBackpropFilter' :: forall v'1 v'2 v'3 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- conv3DBackpropFilterV2 :: forall v'1 v'2 v'3 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
- conv3DBackpropFilterV2' :: forall v'1 v'2 v'3 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
- conv3DBackpropInput :: forall v'1 v'2 v'3 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- conv3DBackpropInput' :: forall v'1 v'2 v'3 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- conv3DBackpropInputV2 :: forall v'1 v'2 v'3 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- conv3DBackpropInputV2' :: forall v'1 v'2 v'3 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- copy :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- copy' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- copyHost :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- copyHost' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- cos :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- cos' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- countUpTo :: forall t m'. (MonadBuild m', OneOf `[Int32, Int64]` t) => Int64 -> Tensor Ref t -> m' (Tensor Value t)
- countUpTo' :: forall t m'. (MonadBuild m', OneOf `[Int32, Int64]` t) => OpParams -> Int64 -> Tensor Ref t -> m' (Tensor Value t)
- cropAndResize :: forall v'1 v'2 v'3 v'4 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build Float
- cropAndResize' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build Float
- cropAndResizeGradBoxes :: forall v'1 v'2 v'3 v'4 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 Float -> Tensor v'2 t -> Tensor v'3 Float -> Tensor v'4 Int32 -> Tensor Build Float
- cropAndResizeGradBoxes' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 Float -> Tensor v'2 t -> Tensor v'3 Float -> Tensor v'4 Int32 -> Tensor Build Float
- cropAndResizeGradImage :: forall v'1 v'2 v'3 v'4 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
- cropAndResizeGradImage' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
- cross :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- cross' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- cumprod :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- cumprod' :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- cumsum :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- cumsum' :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- debugIdentity :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- debugIdentity' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- debugNanCount :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build Int64
- debugNanCount' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build Int64
- debugNumericSummary :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build Double
- debugNumericSummary' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build Double
- decodeBase64 :: Tensor v'1 ByteString -> Tensor Build ByteString
- decodeBase64' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString
- decodeCSV :: forall v'1 v'2 oUT_TYPE. OneOfs `[ByteString, Int32, Int64, Float]` oUT_TYPE => Tensor v'1 ByteString -> TensorList v'2 oUT_TYPE -> TensorList Build oUT_TYPE
- decodeCSV' :: forall v'1 v'2 oUT_TYPE. OneOfs `[ByteString, Int32, Int64, Float]` oUT_TYPE => OpParams -> Tensor v'1 ByteString -> TensorList v'2 oUT_TYPE -> TensorList Build oUT_TYPE
- decodeGif :: Tensor v'1 ByteString -> Tensor Build Word8
- decodeGif' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Word8
- decodeJSONExample :: Tensor v'1 ByteString -> Tensor Build ByteString
- decodeJSONExample' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString
- decodeJpeg :: Tensor v'1 ByteString -> Tensor Build Word8
- decodeJpeg' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Word8
- decodePng :: forall v'1 dtype. OneOf `[Word16, Word8]` dtype => Tensor v'1 ByteString -> Tensor Build dtype
- decodePng' :: forall v'1 dtype. OneOf `[Word16, Word8]` dtype => OpParams -> Tensor v'1 ByteString -> Tensor Build dtype
- decodeRaw :: forall v'1 out_type. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` out_type => Tensor v'1 ByteString -> Tensor Build out_type
- decodeRaw' :: forall v'1 out_type. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` out_type => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type
- deleteSessionTensor :: forall v'1 m'. MonadBuild m' => Tensor v'1 ByteString -> m' ControlNode
- deleteSessionTensor' :: forall v'1 m'. MonadBuild m' => OpParams -> Tensor v'1 ByteString -> m' ControlNode
- denseToDenseSetOperation :: forall v'1 v'2 t. OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- denseToDenseSetOperation' :: forall v'1 v'2 t. OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- denseToSparseSetOperation :: forall v'1 v'2 v'3 v'4 t. OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- denseToSparseSetOperation' :: forall v'1 v'2 v'3 v'4 t. OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- depthToSpace :: forall v'1 t. TensorType t => Int64 -> Tensor v'1 t -> Tensor Build t
- depthToSpace' :: forall v'1 t. TensorType t => OpParams -> Int64 -> Tensor v'1 t -> Tensor Build t
- depthwiseConv2dNative :: forall v'1 v'2 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- depthwiseConv2dNative' :: forall v'1 v'2 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- depthwiseConv2dNativeBackpropFilter :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
- depthwiseConv2dNativeBackpropFilter' :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
- depthwiseConv2dNativeBackpropInput :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- depthwiseConv2dNativeBackpropInput' :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- dequantize :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Word16, Word8]` t => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
- dequantize' :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Word16, Word8]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
- deserializeManySparse :: forall v'1 dtype. TensorType dtype => Tensor v'1 ByteString -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64)
- deserializeManySparse' :: forall v'1 dtype. TensorType dtype => OpParams -> Tensor v'1 ByteString -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64)
- destroyTemporaryVariable :: forall t m'. (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Value t)
- destroyTemporaryVariable' :: forall t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Value t)
- diag :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- diag' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- diagPart :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- diagPart' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- digamma :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- digamma' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- dilation2D :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- dilation2D' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- dilation2DBackpropFilter :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- dilation2DBackpropFilter' :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- dilation2DBackpropInput :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- dilation2DBackpropInput' :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- div :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- div' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- drawBoundingBoxes :: forall v'1 v'2 t. OneOf `[Word16, Float]` t => Tensor v'1 t -> Tensor v'2 Float -> Tensor Build t
- drawBoundingBoxes' :: forall v'1 v'2 t. OneOf `[Word16, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor Build t
- dynamicPartition :: forall v'1 v'2 t. TensorType t => Int64 -> Tensor v'1 t -> Tensor v'2 Int32 -> [Tensor Build t]
- dynamicPartition' :: forall v'1 v'2 t. TensorType t => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 Int32 -> [Tensor Build t]
- dynamicStitch :: forall v'1 v'2 t. TensorType t => [Tensor v'1 Int32] -> [Tensor v'2 t] -> Tensor Build t
- dynamicStitch' :: forall v'1 v'2 t. TensorType t => OpParams -> [Tensor v'1 Int32] -> [Tensor v'2 t] -> Tensor Build t
- editDistance :: forall v'1 v'2 v'3 v'4 v'5 v'6 t. TensorType t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor Build Float
- editDistance' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t. TensorType t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor Build Float
- elu :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- elu' :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- eluGrad :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- eluGrad' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- encodeBase64 :: Tensor v'1 ByteString -> Tensor Build ByteString
- encodeBase64' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString
- encodeJpeg :: Tensor v'1 Word8 -> Tensor Build ByteString
- encodeJpeg' :: OpParams -> Tensor v'1 Word8 -> Tensor Build ByteString
- encodePng :: forall v'1 t. OneOf `[Word16, Word8]` t => Tensor v'1 t -> Tensor Build ByteString
- encodePng' :: forall v'1 t. OneOf `[Word16, Word8]` t => OpParams -> Tensor v'1 t -> Tensor Build ByteString
- enter :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- enter' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- equal :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- equal' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- erf :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- erf' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- erfc :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- erfc' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- exit :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- exit' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- exp :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- exp' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- expandDims :: forall v'1 v'2 t tdim. (TensorType t, OneOf `[Int32, Int64]` tdim) => Tensor v'1 t -> Tensor v'2 tdim -> Tensor Build t
- expandDims' :: forall v'1 v'2 t tdim. (TensorType t, OneOf `[Int32, Int64]` tdim) => OpParams -> Tensor v'1 t -> Tensor v'2 tdim -> Tensor Build t
- expm1 :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- expm1' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- extractGlimpse :: Tensor v'1 Float -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build Float
- extractGlimpse' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build Float
- extractImagePatches :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- extractImagePatches' :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- fFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- fFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- fFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- fFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- fFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- fFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- fIFOQueue :: forall m'. MonadBuild m' => [DataType] -> m' (Tensor Ref ByteString)
- fIFOQueue' :: forall m'. MonadBuild m' => OpParams -> [DataType] -> m' (Tensor Ref ByteString)
- fIFOQueueV2 :: forall m'. MonadBuild m' => [DataType] -> m' ResourceHandle
- fIFOQueueV2' :: forall m'. MonadBuild m' => OpParams -> [DataType] -> m' ResourceHandle
- fact :: Tensor Build ByteString
- fact' :: OpParams -> Tensor Build ByteString
- fakeQuantWithMinMaxArgs :: Tensor v'1 Float -> Tensor Build Float
- fakeQuantWithMinMaxArgs' :: OpParams -> Tensor v'1 Float -> Tensor Build Float
- fakeQuantWithMinMaxArgsGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
- fakeQuantWithMinMaxArgsGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float
- fakeQuantWithMinMaxVars :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
- fakeQuantWithMinMaxVars' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
- fakeQuantWithMinMaxVarsGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)
- fakeQuantWithMinMaxVarsGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)
- fakeQuantWithMinMaxVarsPerChannel :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
- fakeQuantWithMinMaxVarsPerChannel' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float
- fakeQuantWithMinMaxVarsPerChannelGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)
- fakeQuantWithMinMaxVarsPerChannelGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float)
- fakeQueue :: forall m'. MonadBuild m' => ResourceHandle -> m' (Tensor Ref ByteString)
- fakeQueue' :: forall m'. MonadBuild m' => OpParams -> ResourceHandle -> m' (Tensor Ref ByteString)
- fill :: forall v'1 v'2 t. TensorType t => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
- fill' :: forall v'1 v'2 t. TensorType t => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t
- fixedLengthRecordReader :: forall m'. MonadBuild m' => Int64 -> m' (Tensor Ref ByteString)
- fixedLengthRecordReader' :: forall m'. MonadBuild m' => OpParams -> Int64 -> m' (Tensor Ref ByteString)
- fixedLengthRecordReaderV2 :: forall m'. MonadBuild m' => Int64 -> m' ResourceHandle
- fixedLengthRecordReaderV2' :: forall m'. MonadBuild m' => OpParams -> Int64 -> m' ResourceHandle
- fixedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- fixedUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- floor :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- floor' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- floorDiv :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- floorDiv' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- floorMod :: forall v'1 v'2 t. OneOf `[Int32, Int64, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- floorMod' :: forall v'1 v'2 t. OneOf `[Int32, Int64, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- fractionalAvgPool :: forall v'1 t. OneOf `[Int32, Int64, Double, Float]` t => Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)
- fractionalAvgPool' :: forall v'1 t. OneOf `[Int32, Int64, Double, Float]` t => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)
- fractionalAvgPoolGrad :: forall v'1 v'2 v'3 v'4 t. OneOf `[Int32, Int64, Double, Float]` t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor Build t
- fractionalAvgPoolGrad' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Int32, Int64, Double, Float]` t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor Build t
- fractionalMaxPool :: forall v'1 t. OneOf `[Int32, Int64, Double, Float]` t => Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)
- fractionalMaxPool' :: forall v'1 t. OneOf `[Int32, Int64, Double, Float]` t => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)
- fractionalMaxPoolGrad :: forall v'1 v'2 v'3 v'4 v'5 t. OneOf `[Int32, Int64, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 Int64 -> Tensor v'5 Int64 -> Tensor Build t
- fractionalMaxPoolGrad' :: forall v'1 v'2 v'3 v'4 v'5 t. OneOf `[Int32, Int64, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 Int64 -> Tensor v'5 Int64 -> Tensor Build t
- fusedBatchNorm :: forall v'1 v'2 v'3 v'4 v'5 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
- fusedBatchNorm' :: forall v'1 v'2 v'3 v'4 v'5 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
- fusedBatchNormGrad :: forall v'1 v'2 v'3 v'4 v'5 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
- fusedBatchNormGrad' :: forall v'1 v'2 v'3 v'4 v'5 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)
- fusedPadConv2D :: forall v'1 v'2 v'3 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
- fusedPadConv2D' :: forall v'1 v'2 v'3 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t
- fusedResizeAndPadConv2D :: forall v'1 v'2 v'3 v'4 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor v'4 t -> Tensor Build t
- fusedResizeAndPadConv2D' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor v'4 t -> Tensor Build t
- gather :: forall v'1 v'2 tparams tindices. (TensorType tparams, OneOf `[Int32, Int64]` tindices) => Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams
- gather' :: forall v'1 v'2 tparams tindices. (TensorType tparams, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams
- gatherNd :: forall v'1 v'2 tparams tindices. (TensorType tparams, OneOf `[Int32, Int64]` tindices) => Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams
- gatherNd' :: forall v'1 v'2 tparams tindices. (TensorType tparams, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams
- getSessionHandle :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build ByteString
- getSessionHandle' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build ByteString
- getSessionTensor :: forall v'1 dtype. TensorType dtype => Tensor v'1 ByteString -> Tensor Build dtype
- getSessionTensor' :: forall v'1 dtype. TensorType dtype => OpParams -> Tensor v'1 ByteString -> Tensor Build dtype
- greater :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- greater' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- greaterEqual :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- greaterEqual' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- hSVToRGB :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- hSVToRGB' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- hashTable :: forall m'. MonadBuild m' => DataType -> DataType -> m' (Tensor Ref ByteString)
- hashTable' :: forall m'. MonadBuild m' => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString)
- histogramSummary :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
- histogramSummary' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
- iFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- iFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- iFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- iFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- iFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- iFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float)
- identity :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- identity' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- identityReader :: forall m'. MonadBuild m' => m' (Tensor Ref ByteString)
- identityReader' :: forall m'. MonadBuild m' => OpParams -> m' (Tensor Ref ByteString)
- identityReaderV2 :: forall m'. MonadBuild m' => m' ResourceHandle
- identityReaderV2' :: forall m'. MonadBuild m' => OpParams -> m' ResourceHandle
- igamma :: forall v'1 v'2 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- igamma' :: forall v'1 v'2 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- igammac :: forall v'1 v'2 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- igammac' :: forall v'1 v'2 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- imag :: forall v'1 t tout. (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) => Tensor v'1 t -> Tensor Build tout
- imag' :: forall v'1 t tout. (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) => OpParams -> Tensor v'1 t -> Tensor Build tout
- imageSummary :: forall v'1 v'2 t. OneOf `[Word16, Word8, Float]` t => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
- imageSummary' :: forall v'1 v'2 t. OneOf `[Word16, Word8, Float]` t => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
- immutableConst :: forall dtype. TensorType dtype => Shape -> Tensor Build dtype
- immutableConst' :: forall dtype. TensorType dtype => OpParams -> Shape -> Tensor Build dtype
- inTopK :: forall v'1 v'2 t. OneOf `[Int32, Int64]` t => Int64 -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build Bool
- inTopK' :: forall v'1 v'2 t. OneOf `[Int32, Int64]` t => OpParams -> Int64 -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build Bool
- initializeTable :: forall v'2 v'3 tkey tval m'. (MonadBuild m', TensorType tkey, TensorType tval) => Tensor Ref ByteString -> Tensor v'2 tkey -> Tensor v'3 tval -> m' ControlNode
- initializeTable' :: forall v'2 v'3 tkey tval m'. (MonadBuild m', TensorType tkey, TensorType tval) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tkey -> Tensor v'3 tval -> m' ControlNode
- initializeTableFromTextFile :: forall v'2 m'. MonadBuild m' => Int64 -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' ControlNode
- initializeTableFromTextFile' :: forall v'2 m'. MonadBuild m' => OpParams -> Int64 -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' ControlNode
- inv :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- inv' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- invGrad :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- invGrad' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- invertPermutation :: forall v'1 t. OneOf `[Int32, Int64]` t => Tensor v'1 t -> Tensor Build t
- invertPermutation' :: forall v'1 t. OneOf `[Int32, Int64]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- isFinite :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build Bool
- isFinite' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build Bool
- isInf :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build Bool
- isInf' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build Bool
- isNan :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build Bool
- isNan' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build Bool
- isVariableInitialized :: forall dtype m'. (MonadBuild m', TensorType dtype) => Tensor Ref dtype -> m' (Tensor Value Bool)
- isVariableInitialized' :: forall dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref dtype -> m' (Tensor Value Bool)
- l2Loss :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- l2Loss' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- lRN :: forall v'1 t. OneOf `[Word16, Float]` t => Tensor v'1 t -> Tensor Build t
- lRN' :: forall v'1 t. OneOf `[Word16, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- lRNGrad :: forall v'1 v'2 v'3 t. OneOf `[Word16, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- lRNGrad' :: forall v'1 v'2 v'3 t. OneOf `[Word16, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- learnedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- learnedUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- less :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- less' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- lessEqual :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- lessEqual' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- lgamma :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- lgamma' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- linSpace :: forall v'1 v'2 v'3 t tidx. (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 tidx -> Tensor Build t
- linSpace' :: forall v'1 v'2 v'3 t tidx. (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 tidx -> Tensor Build t
- listDiff :: forall v'1 v'2 t out_idx. (TensorType t, OneOf `[Int32, Int64]` out_idx) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build out_idx)
- listDiff' :: forall v'1 v'2 t out_idx. (TensorType t, OneOf `[Int32, Int64]` out_idx) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build out_idx)
- log :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- log' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- log1p :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- log1p' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- logSoftmax :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- logSoftmax' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- logUniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- logUniformCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- logicalAnd :: Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool
- logicalAnd' :: OpParams -> Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool
- logicalNot :: Tensor v'1 Bool -> Tensor Build Bool
- logicalNot' :: OpParams -> Tensor v'1 Bool -> Tensor Build Bool
- logicalOr :: Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool
- logicalOr' :: OpParams -> Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool
- lookupTableExport :: forall tkeys tvalues m'. (MonadBuild m', TensorType tkeys, TensorType tvalues) => Tensor Ref ByteString -> m' (Tensor Value tkeys, Tensor Value tvalues)
- lookupTableExport' :: forall tkeys tvalues m'. (MonadBuild m', TensorType tkeys, TensorType tvalues) => OpParams -> Tensor Ref ByteString -> m' (Tensor Value tkeys, Tensor Value tvalues)
- lookupTableFind :: forall v'2 v'3 tin tout m'. (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (Tensor Value tout)
- lookupTableFind' :: forall v'2 v'3 tin tout m'. (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (Tensor Value tout)
- lookupTableImport :: forall v'2 v'3 tin tout m'. (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' ControlNode
- lookupTableImport' :: forall v'2 v'3 tin tout m'. (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' ControlNode
- lookupTableInsert :: forall v'2 v'3 tin tout m'. (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' ControlNode
- lookupTableInsert' :: forall v'2 v'3 tin tout m'. (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' ControlNode
- lookupTableSize :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' (Tensor Value Int64)
- lookupTableSize' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64)
- loopCond :: Tensor v'1 Bool -> Tensor Build Bool
- loopCond' :: OpParams -> Tensor v'1 Bool -> Tensor Build Bool
- matMul :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- matMul' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- matchingFiles :: Tensor v'1 ByteString -> Tensor Build ByteString
- matchingFiles' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString
- matrixBandPart :: forall v'1 v'2 v'3 t. TensorType t => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t
- matrixBandPart' :: forall v'1 v'2 v'3 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t
- matrixDeterminant :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- matrixDeterminant' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- matrixDiag :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- matrixDiag' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- matrixDiagPart :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- matrixDiagPart' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- matrixInverse :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- matrixInverse' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- matrixSetDiag :: forall v'1 v'2 t. TensorType t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- matrixSetDiag' :: forall v'1 v'2 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- matrixSolve :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- matrixSolve' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- matrixSolveLs :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t
- matrixSolveLs' :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t
- matrixTriangularSolve :: forall v'1 v'2 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- matrixTriangularSolve' :: forall v'1 v'2 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- max :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- max' :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- maxPool :: forall v'1 t. OneOf `[Word16, Float]` t => Tensor v'1 t -> Tensor Build t
- maxPool' :: forall v'1 t. OneOf `[Word16, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- maxPool3D :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- maxPool3D' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- maxPool3DGrad :: forall v'1 v'2 v'3 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 t -> Tensor Build t
- maxPool3DGrad' :: forall v'1 v'2 v'3 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 t -> Tensor Build t
- maxPoolGrad :: forall v'1 v'2 v'3 t. OneOf `[Word16, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- maxPoolGrad' :: forall v'1 v'2 v'3 t. OneOf `[Word16, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- maxPoolGradWithArgmax :: forall v'1 v'2 v'3 targmax t. (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 targmax -> Tensor Build t
- maxPoolGradWithArgmax' :: forall v'1 v'2 v'3 targmax t. (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 targmax -> Tensor Build t
- maxPoolWithArgmax :: forall v'1 targmax t. (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) => Tensor v'1 t -> (Tensor Build t, Tensor Build targmax)
- maxPoolWithArgmax' :: forall v'1 targmax t. (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build targmax)
- maximum :: forall v'1 v'2 t. OneOf `[Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- maximum' :: forall v'1 v'2 t. OneOf `[Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- mean :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- mean' :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- merge :: forall v'1 t. TensorType t => [Tensor v'1 t] -> (Tensor Build t, Tensor Build Int32)
- merge' :: forall v'1 t. TensorType t => OpParams -> [Tensor v'1 t] -> (Tensor Build t, Tensor Build Int32)
- mergeSummary :: [Tensor v'1 ByteString] -> Tensor Build ByteString
- mergeSummary' :: OpParams -> [Tensor v'1 ByteString] -> Tensor Build ByteString
- mergeV2Checkpoints :: forall v'1 v'2 m'. MonadBuild m' => Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' ControlNode
- mergeV2Checkpoints' :: forall v'1 v'2 m'. MonadBuild m' => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' ControlNode
- min :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- min' :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- minimum :: forall v'1 v'2 t. OneOf `[Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- minimum' :: forall v'1 v'2 t. OneOf `[Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- mirrorPad :: forall v'1 v'2 t tpaddings. (TensorType t, OneOf `[Int32, Int64]` tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
- mirrorPad' :: forall v'1 v'2 t tpaddings. (TensorType t, OneOf `[Int32, Int64]` tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
- mirrorPadGrad :: forall v'1 v'2 t tpaddings. (TensorType t, OneOf `[Int32, Int64]` tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
- mirrorPadGrad' :: forall v'1 v'2 t tpaddings. (TensorType t, OneOf `[Int32, Int64]` tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
- mod :: forall v'1 v'2 t. OneOf `[Int32, Int64, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- mod' :: forall v'1 v'2 t. OneOf `[Int32, Int64, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- mul :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- mul' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- multinomial :: forall v'1 v'2 t m'. (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor v'1 t -> Tensor v'2 Int32 -> m' (Tensor Value Int64)
- multinomial' :: forall v'1 v'2 t m'. (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> m' (Tensor Value Int64)
- mutableDenseHashTable :: forall v'1 key_dtype m'. (MonadBuild m', TensorType key_dtype) => DataType -> Tensor v'1 key_dtype -> m' (Tensor Ref ByteString)
- mutableDenseHashTable' :: forall v'1 key_dtype m'. (MonadBuild m', TensorType key_dtype) => OpParams -> DataType -> Tensor v'1 key_dtype -> m' (Tensor Ref ByteString)
- mutableHashTable :: forall m'. MonadBuild m' => DataType -> DataType -> m' (Tensor Ref ByteString)
- mutableHashTable' :: forall m'. MonadBuild m' => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString)
- mutableHashTableOfTensors :: forall m'. MonadBuild m' => DataType -> DataType -> m' (Tensor Ref ByteString)
- mutableHashTableOfTensors' :: forall m'. MonadBuild m' => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString)
- neg :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- neg' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- negTrain :: forall v'3 v'4 v'5 m'. MonadBuild m' => Int64 -> Tensor Ref Float -> Tensor Ref Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor v'5 Float -> m' ControlNode
- negTrain' :: forall v'3 v'4 v'5 m'. MonadBuild m' => OpParams -> Int64 -> Tensor Ref Float -> Tensor Ref Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor v'5 Float -> m' ControlNode
- nextIteration :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- nextIteration' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- noOp :: forall m'. MonadBuild m' => m' ControlNode
- noOp' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode
- nonMaxSuppression :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor Build Int32
- nonMaxSuppression' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor Build Int32
- notEqual :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- notEqual' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool
- oneHot :: forall v'1 v'2 v'3 v'4 t tI. (TensorType t, OneOf `[Int32, Int64, Word8]` tI) => Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
- oneHot' :: forall v'1 v'2 v'3 v'4 t tI. (TensorType t, OneOf `[Int32, Int64, Word8]` tI) => OpParams -> Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
- pack :: forall v'1 t. TensorType t => [Tensor v'1 t] -> Tensor Build t
- pack' :: forall v'1 t. TensorType t => OpParams -> [Tensor v'1 t] -> Tensor Build t
- pad :: forall v'1 v'2 t tpaddings. (TensorType t, OneOf `[Int32, Int64]` tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
- pad' :: forall v'1 v'2 t tpaddings. (TensorType t, OneOf `[Int32, Int64]` tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
- paddingFIFOQueue :: forall m'. MonadBuild m' => [DataType] -> m' (Tensor Ref ByteString)
- paddingFIFOQueue' :: forall m'. MonadBuild m' => OpParams -> [DataType] -> m' (Tensor Ref ByteString)
- paddingFIFOQueueV2 :: forall m'. MonadBuild m' => [DataType] -> m' ResourceHandle
- paddingFIFOQueueV2' :: forall m'. MonadBuild m' => OpParams -> [DataType] -> m' ResourceHandle
- parallelConcat :: forall v'1 t. TensorType t => Shape -> [Tensor v'1 t] -> Tensor Build t
- parallelConcat' :: forall v'1 t. TensorType t => OpParams -> Shape -> [Tensor v'1 t] -> Tensor Build t
- parameterizedTruncatedNormal :: forall v'1 v'2 v'3 v'4 v'5 dtype t m'. (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) => Tensor v'1 t -> Tensor v'2 dtype -> Tensor v'3 dtype -> Tensor v'4 dtype -> Tensor v'5 dtype -> m' (Tensor Value dtype)
- parameterizedTruncatedNormal' :: forall v'1 v'2 v'3 v'4 v'5 dtype t m'. (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) => OpParams -> Tensor v'1 t -> Tensor v'2 dtype -> Tensor v'3 dtype -> Tensor v'4 dtype -> Tensor v'5 dtype -> m' (Tensor Value dtype)
- parseExample :: forall v'1 v'2 v'3 v'4 v'5 sparse_types tdense. (OneOfs `[ByteString, Int64, Float]` sparse_types, OneOfs `[ByteString, Int64, Float]` tdense) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> TensorList v'5 tdense -> ([Tensor Build Int64], TensorList Build sparse_types, [Tensor Build Int64], TensorList Build tdense)
- parseExample' :: forall v'1 v'2 v'3 v'4 v'5 sparse_types tdense. (OneOfs `[ByteString, Int64, Float]` sparse_types, OneOfs `[ByteString, Int64, Float]` tdense) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> TensorList v'5 tdense -> ([Tensor Build Int64], TensorList Build sparse_types, [Tensor Build Int64], TensorList Build tdense)
- parseSingleSequenceExample :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 context_sparse_types tcontext_dense feature_list_dense_types feature_list_sparse_types. (OneOfs `[ByteString, Int64, Float]` context_sparse_types, OneOfs `[ByteString, Int64, Float]` tcontext_dense, OneOfs `[ByteString, Int64, Float]` feature_list_dense_types, OneOfs `[ByteString, Int64, Float]` feature_list_sparse_types) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> [Tensor v'5 ByteString] -> [Tensor v'6 ByteString] -> TensorList v'7 tcontext_dense -> Tensor v'8 ByteString -> ([Tensor Build Int64], TensorList Build context_sparse_types, [Tensor Build Int64], TensorList Build tcontext_dense, [Tensor Build Int64], TensorList Build feature_list_sparse_types, [Tensor Build Int64], TensorList Build feature_list_dense_types)
- parseSingleSequenceExample' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 context_sparse_types tcontext_dense feature_list_dense_types feature_list_sparse_types. (OneOfs `[ByteString, Int64, Float]` context_sparse_types, OneOfs `[ByteString, Int64, Float]` tcontext_dense, OneOfs `[ByteString, Int64, Float]` feature_list_dense_types, OneOfs `[ByteString, Int64, Float]` feature_list_sparse_types) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> [Tensor v'5 ByteString] -> [Tensor v'6 ByteString] -> TensorList v'7 tcontext_dense -> Tensor v'8 ByteString -> ([Tensor Build Int64], TensorList Build context_sparse_types, [Tensor Build Int64], TensorList Build tcontext_dense, [Tensor Build Int64], TensorList Build feature_list_sparse_types, [Tensor Build Int64], TensorList Build feature_list_dense_types)
- parseTensor :: forall v'1 out_type. TensorType out_type => Tensor v'1 ByteString -> Tensor Build out_type
- parseTensor' :: forall v'1 out_type. TensorType out_type => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type
- placeholder :: forall dtype. TensorType dtype => Tensor Build dtype
- placeholder' :: forall dtype. TensorType dtype => OpParams -> Tensor Build dtype
- placeholderV2 :: forall dtype. TensorType dtype => Shape -> Tensor Build dtype
- placeholderV2' :: forall dtype. TensorType dtype => OpParams -> Shape -> Tensor Build dtype
- placeholderWithDefault :: forall v'1 dtype. TensorType dtype => Shape -> Tensor v'1 dtype -> Tensor Build dtype
- placeholderWithDefault' :: forall v'1 dtype. TensorType dtype => OpParams -> Shape -> Tensor v'1 dtype -> Tensor Build dtype
- polygamma :: forall v'1 v'2 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- polygamma' :: forall v'1 v'2 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- pow :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- pow' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- preventGradient :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- preventGradient' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- print :: forall v'1 v'2 t u m'. (MonadBuild m', TensorType t, TensorTypes u) => Tensor v'1 t -> TensorList v'2 u -> m' (Tensor Value t)
- print' :: forall v'1 v'2 t u m'. (MonadBuild m', TensorType t, TensorTypes u) => OpParams -> Tensor v'1 t -> TensorList v'2 u -> m' (Tensor Value t)
- priorityQueue :: forall m'. MonadBuild m' => m' (Tensor Ref ByteString)
- priorityQueue' :: forall m'. MonadBuild m' => OpParams -> m' (Tensor Ref ByteString)
- priorityQueueV2 :: forall m'. MonadBuild m' => m' ResourceHandle
- priorityQueueV2' :: forall m'. MonadBuild m' => OpParams -> m' ResourceHandle
- prod :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- prod' :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- qr :: forall v'1 t. OneOf `[Complex Double, Complex Float, Double, Float]` t => Tensor v'1 t -> (Tensor Build t, Tensor Build t)
- qr' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Double, Float]` t => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t)
- quantizeAndDequantize :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- quantizeAndDequantize' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- quantizeDownAndShrinkRange :: forall v'1 v'2 v'3 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizeDownAndShrinkRange' :: forall v'1 v'2 v'3 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizeV2 :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Word16, Word8]` t => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- quantizeV2' :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Word16, Word8]` t => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- quantizedAvgPool :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Word16, Word8]` t => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- quantizedAvgPool' :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Word16, Word8]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- quantizedBatchNormWithGlobalNormalization :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 v'13 v'14 v'15 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => Bool -> Float -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 tinput -> Tensor v'5 Float -> Tensor v'6 Float -> Tensor v'7 tinput -> Tensor v'8 Float -> Tensor v'9 Float -> Tensor v'10 tinput -> Tensor v'11 Float -> Tensor v'12 Float -> Tensor v'13 tinput -> Tensor v'14 Float -> Tensor v'15 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedBatchNormWithGlobalNormalization' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 v'13 v'14 v'15 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => OpParams -> Bool -> Float -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 tinput -> Tensor v'5 Float -> Tensor v'6 Float -> Tensor v'7 tinput -> Tensor v'8 Float -> Tensor v'9 Float -> Tensor v'10 tinput -> Tensor v'11 Float -> Tensor v'12 Float -> Tensor v'13 tinput -> Tensor v'14 Float -> Tensor v'15 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedBiasAdd :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 out_type. (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` out_type) => Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedBiasAdd' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 out_type. (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` out_type) => OpParams -> Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedConcat :: forall v'1 v'2 v'3 v'4 t. TensorType t => Tensor v'1 Int32 -> [Tensor v'2 t] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- quantizedConcat' :: forall v'1 v'2 v'3 v'4 t. TensorType t => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- quantizedConv2D :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` tfilter, OneOf `[Int16, Int32, Word16, Word8]` out_type) => Tensor v'1 tinput -> Tensor v'2 tfilter -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedConv2D' :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` tfilter, OneOf `[Int16, Int32, Word16, Word8]` out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 tfilter -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedInstanceNorm :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Word16, Word8]` t => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- quantizedInstanceNorm' :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Word16, Word8]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- quantizedMatMul :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput. (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` toutput) => Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)
- quantizedMatMul' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput. (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` toutput) => OpParams -> Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)
- quantizedMaxPool :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Word16, Word8]` t => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- quantizedMaxPool' :: forall v'1 v'2 v'3 t. OneOf `[Int16, Int32, Word16, Word8]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- quantizedRelu :: forall v'1 v'2 v'3 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedRelu' :: forall v'1 v'2 v'3 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedRelu6 :: forall v'1 v'2 v'3 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedRelu6' :: forall v'1 v'2 v'3 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedReluX :: forall v'1 v'2 v'3 v'4 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedReluX' :: forall v'1 v'2 v'3 v'4 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- quantizedReshape :: forall v'1 v'2 v'3 v'4 t tshape. (TensorType t, OneOf `[Int32, Int64]` tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- quantizedReshape' :: forall v'1 v'2 v'3 v'4 t tshape. (TensorType t, OneOf `[Int32, Int64]` tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float)
- queueClose :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' ControlNode
- queueClose' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' ControlNode
- queueCloseV2 :: forall m'. MonadBuild m' => ResourceHandle -> m' ControlNode
- queueCloseV2' :: forall m'. MonadBuild m' => OpParams -> ResourceHandle -> m' ControlNode
- queueDequeue :: forall component_types m'. (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> m' (TensorList Value component_types)
- queueDequeue' :: forall component_types m'. (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> m' (TensorList Value component_types)
- queueDequeueMany :: forall v'2 component_types m'. (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList Value component_types)
- queueDequeueMany' :: forall v'2 component_types m'. (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList Value component_types)
- queueDequeueManyV2 :: forall v'2 component_types m'. (MonadBuild m', TensorTypes component_types) => ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList Value component_types)
- queueDequeueManyV2' :: forall v'2 component_types m'. (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList Value component_types)
- queueDequeueUpTo :: forall v'2 component_types m'. (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList Value component_types)
- queueDequeueUpTo' :: forall v'2 component_types m'. (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList Value component_types)
- queueDequeueUpToV2 :: forall v'2 component_types m'. (MonadBuild m', TensorTypes component_types) => ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList Value component_types)
- queueDequeueUpToV2' :: forall v'2 component_types m'. (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList Value component_types)
- queueDequeueV2 :: forall component_types m'. (MonadBuild m', TensorTypes component_types) => ResourceHandle -> m' (TensorList Value component_types)
- queueDequeueV2' :: forall component_types m'. (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> m' (TensorList Value component_types)
- queueEnqueue :: forall v'2 tcomponents m'. (MonadBuild m', TensorTypes tcomponents) => Tensor Ref ByteString -> TensorList v'2 tcomponents -> m' ControlNode
- queueEnqueue' :: forall v'2 tcomponents m'. (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref ByteString -> TensorList v'2 tcomponents -> m' ControlNode
- queueEnqueueMany :: forall v'2 tcomponents m'. (MonadBuild m', TensorTypes tcomponents) => Tensor Ref ByteString -> TensorList v'2 tcomponents -> m' ControlNode
- queueEnqueueMany' :: forall v'2 tcomponents m'. (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref ByteString -> TensorList v'2 tcomponents -> m' ControlNode
- queueEnqueueManyV2 :: forall v'2 tcomponents m'. (MonadBuild m', TensorTypes tcomponents) => ResourceHandle -> TensorList v'2 tcomponents -> m' ControlNode
- queueEnqueueManyV2' :: forall v'2 tcomponents m'. (MonadBuild m', TensorTypes tcomponents) => OpParams -> ResourceHandle -> TensorList v'2 tcomponents -> m' ControlNode
- queueEnqueueV2 :: forall v'2 tcomponents m'. (MonadBuild m', TensorTypes tcomponents) => ResourceHandle -> TensorList v'2 tcomponents -> m' ControlNode
- queueEnqueueV2' :: forall v'2 tcomponents m'. (MonadBuild m', TensorTypes tcomponents) => OpParams -> ResourceHandle -> TensorList v'2 tcomponents -> m' ControlNode
- queueSize :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' (Tensor Value Int32)
- queueSize' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32)
- queueSizeV2 :: forall m'. MonadBuild m' => ResourceHandle -> m' (Tensor Value Int32)
- queueSizeV2' :: forall m'. MonadBuild m' => OpParams -> ResourceHandle -> m' (Tensor Value Int32)
- rGBToHSV :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- rGBToHSV' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- randomCrop :: forall v'1 v'2 t m'. (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) => Tensor v'1 t -> Tensor v'2 Int64 -> m' (Tensor Value t)
- randomCrop' :: forall v'1 v'2 t m'. (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> m' (Tensor Value t)
- randomGamma :: forall v'1 v'2 s t m'. (MonadBuild m', OneOf `[Int32, Int64]` s, OneOf `[Word16, Double, Float]` t) => Tensor v'1 s -> Tensor v'2 t -> m' (Tensor Value t)
- randomGamma' :: forall v'1 v'2 s t m'. (MonadBuild m', OneOf `[Int32, Int64]` s, OneOf `[Word16, Double, Float]` t) => OpParams -> Tensor v'1 s -> Tensor v'2 t -> m' (Tensor Value t)
- randomShuffle :: forall v'1 t m'. (MonadBuild m', TensorType t) => Tensor v'1 t -> m' (Tensor Value t)
- randomShuffle' :: forall v'1 t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -> m' (Tensor Value t)
- randomShuffleQueue :: forall m'. MonadBuild m' => [DataType] -> m' (Tensor Ref ByteString)
- randomShuffleQueue' :: forall m'. MonadBuild m' => OpParams -> [DataType] -> m' (Tensor Ref ByteString)
- randomShuffleQueueV2 :: forall m'. MonadBuild m' => [DataType] -> m' ResourceHandle
- randomShuffleQueueV2' :: forall m'. MonadBuild m' => OpParams -> [DataType] -> m' ResourceHandle
- randomStandardNormal :: forall v'1 dtype t m'. (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) => Tensor v'1 t -> m' (Tensor Value dtype)
- randomStandardNormal' :: forall v'1 dtype t m'. (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype)
- randomUniform :: forall v'1 dtype t m'. (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) => Tensor v'1 t -> m' (Tensor Value dtype)
- randomUniform' :: forall v'1 dtype t m'. (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype)
- randomUniformInt :: forall v'1 v'2 v'3 tout t m'. (MonadBuild m', OneOf `[Int32, Int64]` tout, OneOf `[Int32, Int64]` t) => Tensor v'1 t -> Tensor v'2 tout -> Tensor v'3 tout -> m' (Tensor Value tout)
- randomUniformInt' :: forall v'1 v'2 v'3 tout t m'. (MonadBuild m', OneOf `[Int32, Int64]` tout, OneOf `[Int32, Int64]` t) => OpParams -> Tensor v'1 t -> Tensor v'2 tout -> Tensor v'3 tout -> m' (Tensor Value tout)
- range :: forall v'1 v'2 v'3 tidx. OneOf `[Int32, Int64, Double, Float]` tidx => Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx
- range' :: forall v'1 v'2 v'3 tidx. OneOf `[Int32, Int64, Double, Float]` tidx => OpParams -> Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx
- rank :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build Int32
- rank' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build Int32
- readFile :: Tensor v'1 ByteString -> Tensor Build ByteString
- readFile' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString
- readVariableOp :: forall dtype m'. (MonadBuild m', TensorType dtype) => ResourceHandle -> m' (Tensor Value dtype)
- readVariableOp' :: forall dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> m' (Tensor Value dtype)
- readerNumRecordsProduced :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' (Tensor Value Int64)
- readerNumRecordsProduced' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64)
- readerNumRecordsProducedV2 :: forall m'. MonadBuild m' => ResourceHandle -> m' (Tensor Value Int64)
- readerNumRecordsProducedV2' :: forall m'. MonadBuild m' => OpParams -> ResourceHandle -> m' (Tensor Value Int64)
- readerNumWorkUnitsCompleted :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' (Tensor Value Int64)
- readerNumWorkUnitsCompleted' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64)
- readerNumWorkUnitsCompletedV2 :: forall m'. MonadBuild m' => ResourceHandle -> m' (Tensor Value Int64)
- readerNumWorkUnitsCompletedV2' :: forall m'. MonadBuild m' => OpParams -> ResourceHandle -> m' (Tensor Value Int64)
- readerRead :: forall m'. MonadBuild m' => Tensor Ref ByteString -> Tensor Ref ByteString -> m' (Tensor Value ByteString, Tensor Value ByteString)
- readerRead' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> Tensor Ref ByteString -> m' (Tensor Value ByteString, Tensor Value ByteString)
- readerReadUpTo :: forall v'3 m'. MonadBuild m' => Tensor Ref ByteString -> Tensor Ref ByteString -> Tensor v'3 Int64 -> m' (Tensor Value ByteString, Tensor Value ByteString)
- readerReadUpTo' :: forall v'3 m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> Tensor Ref ByteString -> Tensor v'3 Int64 -> m' (Tensor Value ByteString, Tensor Value ByteString)
- readerReadUpToV2 :: forall v'3 m'. MonadBuild m' => ResourceHandle -> ResourceHandle -> Tensor v'3 Int64 -> m' (Tensor Value ByteString, Tensor Value ByteString)
- readerReadUpToV2' :: forall v'3 m'. MonadBuild m' => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 Int64 -> m' (Tensor Value ByteString, Tensor Value ByteString)
- readerReadV2 :: forall m'. MonadBuild m' => ResourceHandle -> ResourceHandle -> m' (Tensor Value ByteString, Tensor Value ByteString)
- readerReadV2' :: forall m'. MonadBuild m' => OpParams -> ResourceHandle -> ResourceHandle -> m' (Tensor Value ByteString, Tensor Value ByteString)
- readerReset :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' ControlNode
- readerReset' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' ControlNode
- readerResetV2 :: forall m'. MonadBuild m' => ResourceHandle -> m' ControlNode
- readerResetV2' :: forall m'. MonadBuild m' => OpParams -> ResourceHandle -> m' ControlNode
- readerRestoreState :: forall v'2 m'. MonadBuild m' => Tensor Ref ByteString -> Tensor v'2 ByteString -> m' ControlNode
- readerRestoreState' :: forall v'2 m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' ControlNode
- readerRestoreStateV2 :: forall v'2 m'. MonadBuild m' => ResourceHandle -> Tensor v'2 ByteString -> m' ControlNode
- readerRestoreStateV2' :: forall v'2 m'. MonadBuild m' => OpParams -> ResourceHandle -> Tensor v'2 ByteString -> m' ControlNode
- readerSerializeState :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' (Tensor Value ByteString)
- readerSerializeState' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' (Tensor Value ByteString)
- readerSerializeStateV2 :: forall m'. MonadBuild m' => ResourceHandle -> m' (Tensor Value ByteString)
- readerSerializeStateV2' :: forall m'. MonadBuild m' => OpParams -> ResourceHandle -> m' (Tensor Value ByteString)
- real :: forall v'1 t tout. (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) => Tensor v'1 t -> Tensor Build tout
- real' :: forall v'1 t tout. (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) => OpParams -> Tensor v'1 t -> Tensor Build tout
- realDiv :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- realDiv' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- reciprocal :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- reciprocal' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- reciprocalGrad :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- reciprocalGrad' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- recordInput :: forall m'. MonadBuild m' => m' (Tensor Value ByteString)
- recordInput' :: forall m'. MonadBuild m' => OpParams -> m' (Tensor Value ByteString)
- reduceJoin :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString
- reduceJoin' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString
- refEnter :: forall t m'. (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t)
- refEnter' :: forall t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t)
- refExit :: forall t m'. (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t)
- refExit' :: forall t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t)
- refIdentity :: forall t m'. (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t)
- refIdentity' :: forall t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t)
- refMerge :: forall t m'. (MonadBuild m', TensorType t) => [Tensor Ref t] -> m' (Tensor Ref t, Tensor Value Int32)
- refMerge' :: forall t m'. (MonadBuild m', TensorType t) => OpParams -> [Tensor Ref t] -> m' (Tensor Ref t, Tensor Value Int32)
- refNextIteration :: forall t m'. (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t)
- refNextIteration' :: forall t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t)
- refSelect :: forall v'1 t m'. (MonadBuild m', TensorType t) => Tensor v'1 Int32 -> [Tensor Ref t] -> m' (Tensor Ref t)
- refSelect' :: forall v'1 t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor Ref t] -> m' (Tensor Ref t)
- refSwitch :: forall v'2 t m'. (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 Bool -> m' (Tensor Ref t, Tensor Ref t)
- refSwitch' :: forall v'2 t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 Bool -> m' (Tensor Ref t, Tensor Ref t)
- relu :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- relu' :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- relu6 :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- relu6' :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- relu6Grad :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- relu6Grad' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- reluGrad :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- reluGrad' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- requantizationRange :: forall v'1 v'2 v'3 tinput. OneOf `[Int16, Int32, Word16, Word8]` tinput => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build Float, Tensor Build Float)
- requantizationRange' :: forall v'1 v'2 v'3 tinput. OneOf `[Int16, Int32, Word16, Word8]` tinput => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build Float, Tensor Build Float)
- requantize :: forall v'1 v'2 v'3 v'4 v'5 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- requantize' :: forall v'1 v'2 v'3 v'4 v'5 tinput out_type. (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)
- reshape :: forall v'1 v'2 t tshape. (TensorType t, OneOf `[Int32, Int64]` tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t
- reshape' :: forall v'1 v'2 t tshape. (TensorType t, OneOf `[Int32, Int64]` tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t
- resizeArea :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
- resizeArea' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
- resizeBicubic :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
- resizeBicubic' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
- resizeBilinear :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
- resizeBilinear' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float
- resizeBilinearGrad :: forall v'1 v'2 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 Float -> Tensor v'2 t -> Tensor Build t
- resizeBilinearGrad' :: forall v'1 v'2 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build t
- resizeNearestNeighbor :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
- resizeNearestNeighbor' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
- resizeNearestNeighborGrad :: forall v'1 v'2 t. OneOf `[Int32, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
- resizeNearestNeighborGrad' :: forall v'1 v'2 t. OneOf `[Int32, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
- resourceApplyAdadelta :: forall v'4 v'5 v'6 v'7 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' ControlNode
- resourceApplyAdadelta' :: forall v'4 v'5 v'6 v'7 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' ControlNode
- resourceApplyAdagrad :: forall v'3 v'4 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> m' ControlNode
- resourceApplyAdagrad' :: forall v'3 v'4 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> m' ControlNode
- resourceApplyAdagradDA :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' ControlNode
- resourceApplyAdagradDA' :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' ControlNode
- resourceApplyAdam :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' ControlNode
- resourceApplyAdam' :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' ControlNode
- resourceApplyCenteredRMSProp :: forall v'5 v'6 v'7 v'8 v'9 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' ControlNode
- resourceApplyCenteredRMSProp' :: forall v'5 v'6 v'7 v'8 v'9 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' ControlNode
- resourceApplyFtrl :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' ControlNode
- resourceApplyFtrl' :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' ControlNode
- resourceApplyGradientDescent :: forall v'2 v'3 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> m' ControlNode
- resourceApplyGradientDescent' :: forall v'2 v'3 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> m' ControlNode
- resourceApplyMomentum :: forall v'3 v'4 v'5 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' ControlNode
- resourceApplyMomentum' :: forall v'3 v'4 v'5 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' ControlNode
- resourceApplyProximalAdagrad :: forall v'3 v'4 v'5 v'6 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' ControlNode
- resourceApplyProximalAdagrad' :: forall v'3 v'4 v'5 v'6 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' ControlNode
- resourceApplyProximalGradientDescent :: forall v'2 v'3 v'4 v'5 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' ControlNode
- resourceApplyProximalGradientDescent' :: forall v'2 v'3 v'4 v'5 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' ControlNode
- resourceApplyRMSProp :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' ControlNode
- resourceApplyRMSProp' :: forall v'4 v'5 v'6 v'7 v'8 t m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' ControlNode
- resourceGather :: forall v'2 dtype tindices m'. (MonadBuild m', TensorType dtype, OneOf `[Int32, Int64]` tindices) => ResourceHandle -> Tensor v'2 tindices -> m' (Tensor Value dtype)
- resourceGather' :: forall v'2 dtype tindices m'. (MonadBuild m', TensorType dtype, OneOf `[Int32, Int64]` tindices) => OpParams -> ResourceHandle -> Tensor v'2 tindices -> m' (Tensor Value dtype)
- resourceScatterAdd :: forall v'2 v'3 dtype tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype, OneOf `[Int32, Int64]` tindices) => ResourceHandle -> Tensor v'2 tindices -> Tensor v'3 dtype -> m' ControlNode
- resourceScatterAdd' :: forall v'2 v'3 dtype tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype, OneOf `[Int32, Int64]` tindices) => OpParams -> ResourceHandle -> Tensor v'2 tindices -> Tensor v'3 dtype -> m' ControlNode
- resourceSparseApplyAdadelta :: forall v'4 v'5 v'6 v'7 v'8 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' ControlNode
- resourceSparseApplyAdadelta' :: forall v'4 v'5 v'6 v'7 v'8 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' ControlNode
- resourceSparseApplyAdagrad :: forall v'3 v'4 v'5 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' ControlNode
- resourceSparseApplyAdagrad' :: forall v'3 v'4 v'5 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' ControlNode
- resourceSparseApplyAdagradDA :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' ControlNode
- resourceSparseApplyAdagradDA' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' ControlNode
- resourceSparseApplyCenteredRMSProp :: forall v'5 v'6 v'7 v'8 v'9 v'10 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' ControlNode
- resourceSparseApplyCenteredRMSProp' :: forall v'5 v'6 v'7 v'8 v'9 v'10 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' ControlNode
- resourceSparseApplyFtrl :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' ControlNode
- resourceSparseApplyFtrl' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' ControlNode
- resourceSparseApplyMomentum :: forall v'3 v'4 v'5 v'6 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' ControlNode
- resourceSparseApplyMomentum' :: forall v'3 v'4 v'5 v'6 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' ControlNode
- resourceSparseApplyProximalAdagrad :: forall v'3 v'4 v'5 v'6 v'7 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' ControlNode
- resourceSparseApplyProximalAdagrad' :: forall v'3 v'4 v'5 v'6 v'7 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' ControlNode
- resourceSparseApplyProximalGradientDescent :: forall v'2 v'3 v'4 v'5 v'6 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' ControlNode
- resourceSparseApplyProximalGradientDescent' :: forall v'2 v'3 v'4 v'5 v'6 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' ControlNode
- resourceSparseApplyRMSProp :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' ControlNode
- resourceSparseApplyRMSProp' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' ControlNode
- restore :: forall v'1 v'2 dt. TensorType dt => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor Build dt
- restore' :: forall v'1 v'2 dt. TensorType dt => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor Build dt
- restoreSlice :: forall v'1 v'2 v'3 dt. TensorType dt => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> Tensor Build dt
- restoreSlice' :: forall v'1 v'2 v'3 dt. TensorType dt => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> Tensor Build dt
- restoreV2 :: forall v'1 v'2 v'3 dtypes. TensorTypes dtypes => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList Build dtypes
- restoreV2' :: forall v'1 v'2 v'3 dtypes. TensorTypes dtypes => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList Build dtypes
- reverse :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Bool -> Tensor Build t
- reverse' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Bool -> Tensor Build t
- reverseSequence :: forall v'1 v'2 t tlen. (TensorType t, OneOf `[Int32, Int64]` tlen) => Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor Build t
- reverseSequence' :: forall v'1 v'2 t tlen. (TensorType t, OneOf `[Int32, Int64]` tlen) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor Build t
- reverseV2 :: forall v'1 v'2 tidx t. (OneOf `[Int32, Int64]` tidx, OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- reverseV2' :: forall v'1 v'2 tidx t. (OneOf `[Int32, Int64]` tidx, OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- rint :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- rint' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- round :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- round' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- rsqrt :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- rsqrt' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- rsqrtGrad :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- rsqrtGrad' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- sampleDistortedBoundingBox :: forall v'1 v'2 t m'. (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8]` t) => Tensor v'1 t -> Tensor v'2 Float -> m' (Tensor Value t, Tensor Value t, Tensor Value Float)
- sampleDistortedBoundingBox' :: forall v'1 v'2 t m'. (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8]` t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> m' (Tensor Value t, Tensor Value t, Tensor Value Float)
- save :: forall v'1 v'2 v'3 t m'. (MonadBuild m', TensorTypes t) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> TensorList v'3 t -> m' ControlNode
- save' :: forall v'1 v'2 v'3 t m'. (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> TensorList v'3 t -> m' ControlNode
- saveSlices :: forall v'1 v'2 v'3 v'4 t m'. (MonadBuild m', TensorTypes t) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList v'4 t -> m' ControlNode
- saveSlices' :: forall v'1 v'2 v'3 v'4 t m'. (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList v'4 t -> m' ControlNode
- saveV2 :: forall v'1 v'2 v'3 v'4 dtypes m'. (MonadBuild m', TensorTypes dtypes) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList v'4 dtypes -> m' ControlNode
- saveV2' :: forall v'1 v'2 v'3 v'4 dtypes m'. (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList v'4 dtypes -> m' ControlNode
- scalarSummary :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
- scalarSummary' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString
- scatterAdd :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterAdd' :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterDiv :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterDiv' :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterMul :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterMul' :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterNd :: forall v'1 v'2 v'3 t tindices. (TensorType t, OneOf `[Int32, Int64]` tindices) => Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor Build t
- scatterNd' :: forall v'1 v'2 v'3 t tindices. (TensorType t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor Build t
- scatterNdAdd :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterNdAdd' :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterNdSub :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterNdSub' :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterNdUpdate :: forall v'2 v'3 t tindices m'. (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterNdUpdate' :: forall v'2 v'3 t tindices m'. (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterSub :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterSub' :: forall v'2 v'3 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterUpdate :: forall v'2 v'3 t tindices m'. (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- scatterUpdate' :: forall v'2 v'3 t tindices m'. (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t)
- sdcaFprint :: Tensor v'1 ByteString -> Tensor Build Int64
- sdcaFprint' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Int64
- sdcaOptimizer :: Float -> Float -> Int64 -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 Int64] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> Tensor v'5 Float -> Tensor v'6 Float -> [Tensor v'7 Int64] -> [Tensor v'8 Float] -> [Tensor v'9 Float] -> Tensor v'10 Float -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float])
- sdcaOptimizer' :: OpParams -> Float -> Float -> Int64 -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 Int64] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> Tensor v'5 Float -> Tensor v'6 Float -> [Tensor v'7 Int64] -> [Tensor v'8 Float] -> [Tensor v'9 Float] -> Tensor v'10 Float -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float])
- sdcaShrinkL1 :: forall m'. MonadBuild m' => Float -> Float -> [Tensor Ref Float] -> m' ControlNode
- sdcaShrinkL1' :: forall m'. MonadBuild m' => OpParams -> Float -> Float -> [Tensor Ref Float] -> m' ControlNode
- segmentMax :: forall v'1 v'2 t tindices. (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
- segmentMax' :: forall v'1 v'2 t tindices. (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
- segmentMean :: forall v'1 v'2 t tindices. (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
- segmentMean' :: forall v'1 v'2 t tindices. (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
- segmentMin :: forall v'1 v'2 t tindices. (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
- segmentMin' :: forall v'1 v'2 t tindices. (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
- segmentProd :: forall v'1 v'2 t tindices. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
- segmentProd' :: forall v'1 v'2 t tindices. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
- segmentSum :: forall v'1 v'2 t tindices. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
- segmentSum' :: forall v'1 v'2 t tindices. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t
- select :: forall v'1 v'2 v'3 t. TensorType t => Tensor v'1 Bool -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- select' :: forall v'1 v'2 v'3 t. TensorType t => OpParams -> Tensor v'1 Bool -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t
- selfAdjointEig :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor Build t
- selfAdjointEig' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- selfAdjointEigV2 :: forall v'1 t. OneOf `[Double, Float]` t => Tensor v'1 t -> (Tensor Build t, Tensor Build t)
- selfAdjointEigV2' :: forall v'1 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t)
- serializeManySparse :: forall v'1 v'2 v'3 t. TensorType t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString
- serializeManySparse' :: forall v'1 v'2 v'3 t. TensorType t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString
- serializeSparse :: forall v'1 v'2 v'3 t. TensorType t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString
- serializeSparse' :: forall v'1 v'2 v'3 t. TensorType t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString
- setSize :: forall v'1 v'2 v'3 t. OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build Int32
- setSize' :: forall v'1 v'2 v'3 t. OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build Int32
- shape :: forall v'1 t out_type. (TensorType t, OneOf `[Int32, Int64]` out_type) => Tensor v'1 t -> Tensor Build out_type
- shape' :: forall v'1 t out_type. (TensorType t, OneOf `[Int32, Int64]` out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type
- shapeN :: forall v'1 t out_type. (TensorType t, OneOf `[Int32, Int64]` out_type) => [Tensor v'1 t] -> [Tensor Build out_type]
- shapeN' :: forall v'1 t out_type. (TensorType t, OneOf `[Int32, Int64]` out_type) => OpParams -> [Tensor v'1 t] -> [Tensor Build out_type]
- shardedFilename :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor Build ByteString
- shardedFilename' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor Build ByteString
- shardedFilespec :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString
- shardedFilespec' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString
- sigmoid :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- sigmoid' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- sigmoidGrad :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- sigmoidGrad' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- sign :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- sign' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- sin :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- sin' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- size :: forall v'1 t out_type. (TensorType t, OneOf `[Int32, Int64]` out_type) => Tensor v'1 t -> Tensor Build out_type
- size' :: forall v'1 t out_type. (TensorType t, OneOf `[Int32, Int64]` out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type
- skipgram :: forall m'. MonadBuild m' => Int64 -> m' (Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)
- skipgram' :: forall m'. MonadBuild m' => OpParams -> Int64 -> m' (Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)
- slice :: forall v'1 v'2 v'3 t index. (TensorType t, OneOf `[Int32, Int64]` index) => Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor Build t
- slice' :: forall v'1 v'2 v'3 t index. (TensorType t, OneOf `[Int32, Int64]` index) => OpParams -> Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor Build t
- softmax :: forall v'1 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- softmax' :: forall v'1 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- softmaxCrossEntropyWithLogits :: forall v'1 v'2 t. OneOf `[Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
- softmaxCrossEntropyWithLogits' :: forall v'1 v'2 t. OneOf `[Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t)
- softplus :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- softplus' :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- softplusGrad :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- softplusGrad' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- softsign :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- softsign' :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- softsignGrad :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- softsignGrad' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- spaceToBatch :: forall v'1 v'2 t tpaddings. (TensorType t, OneOf `[Int32, Int64]` tpaddings) => Int64 -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
- spaceToBatch' :: forall v'1 v'2 t tpaddings. (TensorType t, OneOf `[Int32, Int64]` tpaddings) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t
- spaceToBatchND :: forall v'1 v'2 v'3 t tblock_shape tpaddings. (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tpaddings) => Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tpaddings -> Tensor Build t
- spaceToBatchND' :: forall v'1 v'2 v'3 t tblock_shape tpaddings. (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tpaddings -> Tensor Build t
- spaceToDepth :: forall v'1 t. TensorType t => Int64 -> Tensor v'1 t -> Tensor Build t
- spaceToDepth' :: forall v'1 t. TensorType t => OpParams -> Int64 -> Tensor v'1 t -> Tensor Build t
- sparseAccumulatorApplyGradient :: forall v'2 v'3 v'4 v'5 dtype m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) => Bool -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 dtype -> Tensor v'5 Int64 -> m' ControlNode
- sparseAccumulatorApplyGradient' :: forall v'2 v'3 v'4 v'5 dtype m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) => OpParams -> Bool -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 dtype -> Tensor v'5 Int64 -> m' ControlNode
- sparseAccumulatorTakeGradient :: forall v'2 dtype m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)
- sparseAccumulatorTakeGradient' :: forall v'2 dtype m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)
- sparseAdd :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t treal. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` treal) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor v'7 treal -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- sparseAdd' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t treal. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` treal) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor v'7 treal -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- sparseAddGrad :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> (Tensor Build t, Tensor Build t)
- sparseAddGrad' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> (Tensor Build t, Tensor Build t)
- sparseApplyAdadelta :: forall v'4 v'5 v'6 v'7 v'8 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (Tensor Ref t)
- sparseApplyAdadelta' :: forall v'4 v'5 v'6 v'7 v'8 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (Tensor Ref t)
- sparseApplyAdagrad :: forall v'3 v'4 v'5 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (Tensor Ref t)
- sparseApplyAdagrad' :: forall v'3 v'4 v'5 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (Tensor Ref t)
- sparseApplyAdagradDA :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (Tensor Ref t)
- sparseApplyAdagradDA' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (Tensor Ref t)
- sparseApplyCenteredRMSProp :: forall v'5 v'6 v'7 v'8 v'9 v'10 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (Tensor Ref t)
- sparseApplyCenteredRMSProp' :: forall v'5 v'6 v'7 v'8 v'9 v'10 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (Tensor Ref t)
- sparseApplyFtrl :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t)
- sparseApplyFtrl' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t)
- sparseApplyMomentum :: forall v'3 v'4 v'5 v'6 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (Tensor Ref t)
- sparseApplyMomentum' :: forall v'3 v'4 v'5 v'6 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (Tensor Ref t)
- sparseApplyProximalAdagrad :: forall v'3 v'4 v'5 v'6 v'7 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (Tensor Ref t)
- sparseApplyProximalAdagrad' :: forall v'3 v'4 v'5 v'6 v'7 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (Tensor Ref t)
- sparseApplyProximalGradientDescent :: forall v'2 v'3 v'4 v'5 v'6 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (Tensor Ref t)
- sparseApplyProximalGradientDescent' :: forall v'2 v'3 v'4 v'5 v'6 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (Tensor Ref t)
- sparseApplyRMSProp :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (Tensor Ref t)
- sparseApplyRMSProp' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m'. (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (Tensor Ref t)
- sparseConcat :: forall v'1 v'2 v'3 t. TensorType t => Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 t] -> [Tensor v'3 Int64] -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- sparseConcat' :: forall v'1 v'2 v'3 t. TensorType t => OpParams -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 t] -> [Tensor v'3 Int64] -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- sparseConditionalAccumulator :: forall m'. MonadBuild m' => DataType -> Shape -> m' (Tensor Ref ByteString)
- sparseConditionalAccumulator' :: forall m'. MonadBuild m' => OpParams -> DataType -> Shape -> m' (Tensor Ref ByteString)
- sparseDenseCwiseAdd :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
- sparseDenseCwiseAdd' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
- sparseDenseCwiseDiv :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
- sparseDenseCwiseDiv' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
- sparseDenseCwiseMul :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
- sparseDenseCwiseMul' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
- sparseMatMul :: forall v'1 v'2 ta tb. (OneOf `[Word16, Float]` ta, OneOf `[Word16, Float]` tb) => Tensor v'1 ta -> Tensor v'2 tb -> Tensor Build Float
- sparseMatMul' :: forall v'1 v'2 ta tb. (OneOf `[Word16, Float]` ta, OneOf `[Word16, Float]` tb) => OpParams -> Tensor v'1 ta -> Tensor v'2 tb -> Tensor Build Float
- sparseReduceSum :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> Tensor Build t
- sparseReduceSum' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> Tensor Build t
- sparseReduceSumSparse :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- sparseReduceSumSparse' :: forall v'1 v'2 v'3 v'4 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- sparseReorder :: forall v'1 v'2 v'3 t. TensorType t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build t)
- sparseReorder' :: forall v'1 v'2 v'3 t. TensorType t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build t)
- sparseReshape :: Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build Int64)
- sparseReshape' :: OpParams -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build Int64)
- sparseSegmentMean :: forall v'1 v'2 v'3 t tidx. (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
- sparseSegmentMean' :: forall v'1 v'2 v'3 t tidx. (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
- sparseSegmentMeanGrad :: forall v'1 v'2 v'3 v'4 t tidx. (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
- sparseSegmentMeanGrad' :: forall v'1 v'2 v'3 v'4 t tidx. (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
- sparseSegmentSqrtN :: forall v'1 v'2 v'3 t tidx. (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
- sparseSegmentSqrtN' :: forall v'1 v'2 v'3 t tidx. (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
- sparseSegmentSqrtNGrad :: forall v'1 v'2 v'3 v'4 t tidx. (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
- sparseSegmentSqrtNGrad' :: forall v'1 v'2 v'3 v'4 t tidx. (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t
- sparseSegmentSum :: forall v'1 v'2 v'3 t tidx. (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
- sparseSegmentSum' :: forall v'1 v'2 v'3 t tidx. (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t
- sparseSoftmax :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build t
- sparseSoftmax' :: forall v'1 v'2 v'3 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build t
- sparseSoftmaxCrossEntropyWithLogits :: forall v'1 v'2 t tlabels. (OneOf `[Word16, Double, Float]` t, OneOf `[Int32, Int64]` tlabels) => Tensor v'1 t -> Tensor v'2 tlabels -> (Tensor Build t, Tensor Build t)
- sparseSoftmaxCrossEntropyWithLogits' :: forall v'1 v'2 t tlabels. (OneOf `[Word16, Double, Float]` t, OneOf `[Int32, Int64]` tlabels) => OpParams -> Tensor v'1 t -> Tensor v'2 tlabels -> (Tensor Build t, Tensor Build t)
- sparseSparseMaximum :: forall v'1 v'2 v'3 v'4 v'5 v'6 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t)
- sparseSparseMaximum' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t)
- sparseSparseMinimum :: forall v'1 v'2 v'3 v'4 v'5 v'6 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t)
- sparseSparseMinimum' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t)
- sparseSplit :: forall v'1 v'2 v'3 v'4 t. TensorType t => Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64])
- sparseSplit' :: forall v'1 v'2 v'3 v'4 t. TensorType t => OpParams -> Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64])
- sparseTensorDenseAdd :: forall v'1 v'2 v'3 v'4 t tindices. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor v'4 t -> Tensor Build t
- sparseTensorDenseAdd' :: forall v'1 v'2 v'3 v'4 t tindices. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor v'4 t -> Tensor Build t
- sparseTensorDenseMatMul :: forall v'1 v'2 v'3 v'4 t. TensorType t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
- sparseTensorDenseMatMul' :: forall v'1 v'2 v'3 v'4 t. TensorType t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t
- sparseToDense :: forall v'1 v'2 v'3 v'4 t tindices. (TensorType t, OneOf `[Int32, Int64]` tindices) => Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
- sparseToDense' :: forall v'1 v'2 v'3 v'4 t tindices. (TensorType t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t
- sparseToSparseSetOperation :: forall v'1 v'2 v'3 v'4 v'5 v'6 t. OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- sparseToSparseSetOperation' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t. OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)
- split :: forall v'1 v'2 t. TensorType t => Int64 -> Tensor v'1 Int32 -> Tensor v'2 t -> [Tensor Build t]
- split' :: forall v'1 v'2 t. TensorType t => OpParams -> Int64 -> Tensor v'1 Int32 -> Tensor v'2 t -> [Tensor Build t]
- splitV :: forall v'1 v'2 v'3 t tlen. (TensorType t, OneOf `[Int32, Int64]` tlen) => Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor v'3 Int32 -> [Tensor Build t]
- splitV' :: forall v'1 v'2 v'3 t tlen. (TensorType t, OneOf `[Int32, Int64]` tlen) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor v'3 Int32 -> [Tensor Build t]
- sqrt :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- sqrt' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- sqrtGrad :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- sqrtGrad' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- square :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- square' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- squaredDifference :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- squaredDifference' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- squeeze :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- squeeze' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- stack :: forall m'. MonadBuild m' => DataType -> m' (Tensor Ref ByteString)
- stack' :: forall m'. MonadBuild m' => OpParams -> DataType -> m' (Tensor Ref ByteString)
- stackClose :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' ControlNode
- stackClose' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' ControlNode
- stackPop :: forall elem_type m'. (MonadBuild m', TensorType elem_type) => Tensor Ref ByteString -> m' (Tensor Value elem_type)
- stackPop' :: forall elem_type m'. (MonadBuild m', TensorType elem_type) => OpParams -> Tensor Ref ByteString -> m' (Tensor Value elem_type)
- stackPush :: forall v'2 t m'. (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> m' (Tensor Value t)
- stackPush' :: forall v'2 t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> m' (Tensor Value t)
- stage :: forall v'1 dtypes m'. (MonadBuild m', TensorTypes dtypes) => TensorList v'1 dtypes -> m' ControlNode
- stage' :: forall v'1 dtypes m'. (MonadBuild m', TensorTypes dtypes) => OpParams -> TensorList v'1 dtypes -> m' ControlNode
- stopGradient :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- stopGradient' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- stridedSlice :: forall v'1 v'2 v'3 v'4 t index. (TensorType t, OneOf `[Int32, Int64]` index) => Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor Build t
- stridedSlice' :: forall v'1 v'2 v'3 v'4 t index. (TensorType t, OneOf `[Int32, Int64]` index) => OpParams -> Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor Build t
- stridedSliceAssign :: forall v'2 v'3 v'4 v'5 t index m'. (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` index) => Tensor Ref t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> m' (Tensor Ref t)
- stridedSliceAssign' :: forall v'2 v'3 v'4 v'5 t index m'. (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` index) => OpParams -> Tensor Ref t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> m' (Tensor Ref t)
- stridedSliceGrad :: forall v'1 v'2 v'3 v'4 v'5 t index. (TensorType t, OneOf `[Int32, Int64]` index) => Tensor v'1 index -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> Tensor Build t
- stridedSliceGrad' :: forall v'1 v'2 v'3 v'4 v'5 t index. (TensorType t, OneOf `[Int32, Int64]` index) => OpParams -> Tensor v'1 index -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> Tensor Build t
- stringJoin :: [Tensor v'1 ByteString] -> Tensor Build ByteString
- stringJoin' :: OpParams -> [Tensor v'1 ByteString] -> Tensor Build ByteString
- stringSplit :: Tensor v'1 ByteString -> Tensor v'2 ByteString -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64)
- stringSplit' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64)
- stringToHashBucket :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
- stringToHashBucket' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
- stringToHashBucketFast :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
- stringToHashBucketFast' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
- stringToHashBucketStrong :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
- stringToHashBucketStrong' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64
- stringToNumber :: forall v'1 out_type. OneOf `[Int32, Float]` out_type => Tensor v'1 ByteString -> Tensor Build out_type
- stringToNumber' :: forall v'1 out_type. OneOf `[Int32, Float]` out_type => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type
- sub :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- sub' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- substr :: forall v'1 v'2 v'3 t. OneOf `[Int32, Int64]` t => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build ByteString
- substr' :: forall v'1 v'2 v'3 t. OneOf `[Int32, Int64]` t => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build ByteString
- sum :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- sum' :: forall v'1 v'2 t tidx. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t
- svd :: forall v'1 t. OneOf `[Complex Double, Complex Float, Double, Float]` t => Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t)
- svd' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Double, Float]` t => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t)
- switch :: forall v'1 v'2 t. TensorType t => Tensor v'1 t -> Tensor v'2 Bool -> (Tensor Build t, Tensor Build t)
- switch' :: forall v'1 v'2 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor v'2 Bool -> (Tensor Build t, Tensor Build t)
- tFRecordReader :: forall m'. MonadBuild m' => m' (Tensor Ref ByteString)
- tFRecordReader' :: forall m'. MonadBuild m' => OpParams -> m' (Tensor Ref ByteString)
- tFRecordReaderV2 :: forall m'. MonadBuild m' => m' ResourceHandle
- tFRecordReaderV2' :: forall m'. MonadBuild m' => OpParams -> m' ResourceHandle
- takeManySparseFromTensorsMap :: forall v'1 dtype m'. (MonadBuild m', TensorType dtype) => Tensor v'1 Int64 -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)
- takeManySparseFromTensorsMap' :: forall v'1 dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 Int64 -> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)
- tan :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- tan' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- tanh :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor Build t
- tanh' :: forall v'1 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor Build t
- tanhGrad :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- tanhGrad' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- temporaryVariable :: forall dtype m'. (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype)
- temporaryVariable' :: forall dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype)
- tensorArray :: forall v'1 m'. MonadBuild m' => DataType -> Tensor v'1 Int32 -> m' (Tensor Ref ByteString)
- tensorArray' :: forall v'1 m'. MonadBuild m' => OpParams -> DataType -> Tensor v'1 Int32 -> m' (Tensor Ref ByteString)
- tensorArrayClose :: forall m'. MonadBuild m' => Tensor Ref ByteString -> m' ControlNode
- tensorArrayClose' :: forall m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> m' ControlNode
- tensorArrayCloseV2 :: forall v'1 m'. MonadBuild m' => Tensor v'1 ByteString -> m' ControlNode
- tensorArrayCloseV2' :: forall v'1 m'. MonadBuild m' => OpParams -> Tensor v'1 ByteString -> m' ControlNode
- tensorArrayCloseV3 :: forall m'. MonadBuild m' => ResourceHandle -> m' ControlNode
- tensorArrayCloseV3' :: forall m'. MonadBuild m' => OpParams -> ResourceHandle -> m' ControlNode
- tensorArrayConcat :: forall v'2 dtype m'. (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype, Tensor Value Int64)
- tensorArrayConcat' :: forall v'2 dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype, Tensor Value Int64)
- tensorArrayConcatV2 :: forall v'1 v'2 dtype. TensorType dtype => Tensor v'1 ByteString -> Tensor v'2 Float -> (Tensor Build dtype, Tensor Build Int64)
- tensorArrayConcatV2' :: forall v'1 v'2 dtype. TensorType dtype => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> (Tensor Build dtype, Tensor Build Int64)
- tensorArrayConcatV3 :: forall v'2 dtype m'. (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value dtype, Tensor Value Int64)
- tensorArrayConcatV3' :: forall v'2 dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value dtype, Tensor Value Int64)
- tensorArrayGather :: forall v'2 v'3 dtype m'. (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
- tensorArrayGather' :: forall v'2 v'3 dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
- tensorArrayGatherV2 :: forall v'1 v'2 v'3 dtype. TensorType dtype => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype
- tensorArrayGatherV2' :: forall v'1 v'2 v'3 dtype. TensorType dtype => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype
- tensorArrayGatherV3 :: forall v'2 v'3 dtype m'. (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
- tensorArrayGatherV3' :: forall v'2 v'3 dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
- tensorArrayGrad :: forall v'1 v'2 m'. MonadBuild m' => Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Ref ByteString)
- tensorArrayGrad' :: forall v'1 v'2 m'. MonadBuild m' => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Ref ByteString)
- tensorArrayGradV2 :: forall v'1 v'2 m'. MonadBuild m' => Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Value ByteString)
- tensorArrayGradV2' :: forall v'1 v'2 m'. MonadBuild m' => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Value ByteString)
- tensorArrayGradV3 :: forall v'2 m'. MonadBuild m' => ResourceHandle -> Tensor v'2 Float -> m' (ResourceHandle, Tensor Value Float)
- tensorArrayGradV3' :: forall v'2 m'. MonadBuild m' => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' (ResourceHandle, Tensor Value Float)
- tensorArrayPack :: forall v'2 dtype m'. (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype)
- tensorArrayPack' :: forall v'2 dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype)
- tensorArrayRead :: forall v'2 v'3 dtype m'. (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
- tensorArrayRead' :: forall v'2 v'3 dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
- tensorArrayReadV2 :: forall v'1 v'2 v'3 dtype. TensorType dtype => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype
- tensorArrayReadV2' :: forall v'1 v'2 v'3 dtype. TensorType dtype => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype
- tensorArrayReadV3 :: forall v'2 v'3 dtype m'. (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
- tensorArrayReadV3' :: forall v'2 v'3 dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype)
- tensorArrayScatter :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorArrayScatter' :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorArrayScatterV2 :: forall v'1 v'2 v'3 v'4 t. TensorType t => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float
- tensorArrayScatterV2' :: forall v'1 v'2 v'3 v'4 t. TensorType t => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float
- tensorArrayScatterV3 :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorArrayScatterV3' :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorArraySize :: forall v'2 m'. MonadBuild m' => Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value Int32)
- tensorArraySize' :: forall v'2 m'. MonadBuild m' => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value Int32)
- tensorArraySizeV2 :: Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build Int32
- tensorArraySizeV2' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build Int32
- tensorArraySizeV3 :: forall v'2 m'. MonadBuild m' => ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value Int32)
- tensorArraySizeV3' :: forall v'2 m'. MonadBuild m' => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value Int32)
- tensorArraySplit :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorArraySplit' :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorArraySplitV2 :: forall v'1 v'2 v'3 v'4 t. TensorType t => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> Tensor Build Float
- tensorArraySplitV2' :: forall v'1 v'2 v'3 v'4 t. TensorType t => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> Tensor Build Float
- tensorArraySplitV3 :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorArraySplitV3' :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorArrayUnpack :: forall v'2 v'3 t m'. (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Float -> m' (Tensor Value Float)
- tensorArrayUnpack' :: forall v'2 v'3 t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Float -> m' (Tensor Value Float)
- tensorArrayV2 :: forall v'1 m'. MonadBuild m' => DataType -> Tensor v'1 Int32 -> m' (Tensor Value ByteString)
- tensorArrayV2' :: forall v'1 m'. MonadBuild m' => OpParams -> DataType -> Tensor v'1 Int32 -> m' (Tensor Value ByteString)
- tensorArrayV3 :: forall v'1 m'. MonadBuild m' => DataType -> Tensor v'1 Int32 -> m' (ResourceHandle, Tensor Value Float)
- tensorArrayV3' :: forall v'1 m'. MonadBuild m' => OpParams -> DataType -> Tensor v'1 Int32 -> m' (ResourceHandle, Tensor Value Float)
- tensorArrayWrite :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorArrayWrite' :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorArrayWriteV2 :: forall v'1 v'2 v'3 v'4 t. TensorType t => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float
- tensorArrayWriteV2' :: forall v'1 v'2 v'3 v'4 t. TensorType t => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float
- tensorArrayWriteV3 :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorArrayWriteV3' :: forall v'2 v'3 v'4 t m'. (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float)
- tensorSummary :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build ByteString
- tensorSummary' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build ByteString
- textLineReader :: forall m'. MonadBuild m' => m' (Tensor Ref ByteString)
- textLineReader' :: forall m'. MonadBuild m' => OpParams -> m' (Tensor Ref ByteString)
- textLineReaderV2 :: forall m'. MonadBuild m' => m' ResourceHandle
- textLineReaderV2' :: forall m'. MonadBuild m' => OpParams -> m' ResourceHandle
- threadUnsafeUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- threadUnsafeUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- tile :: forall v'1 v'2 t tmultiples. (TensorType t, OneOf `[Int32, Int64]` tmultiples) => Tensor v'1 t -> Tensor v'2 tmultiples -> Tensor Build t
- tile' :: forall v'1 v'2 t tmultiples. (TensorType t, OneOf `[Int32, Int64]` tmultiples) => OpParams -> Tensor v'1 t -> Tensor v'2 tmultiples -> Tensor Build t
- tileGrad :: forall v'1 v'2 t. TensorType t => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
- tileGrad' :: forall v'1 v'2 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t
- topK :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Int64 -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int32)
- topK' :: forall v'1 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Int64 -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int32)
- topKV2 :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 Int32 -> (Tensor Build t, Tensor Build Int32)
- topKV2' :: forall v'1 v'2 t. OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> (Tensor Build t, Tensor Build Int32)
- transpose :: forall v'1 v'2 t tperm. (TensorType t, OneOf `[Int32, Int64]` tperm) => Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t
- transpose' :: forall v'1 v'2 t tperm. (TensorType t, OneOf `[Int32, Int64]` tperm) => OpParams -> Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t
- truncateDiv :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- truncateDiv' :: forall v'1 v'2 t. OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- truncateMod :: forall v'1 v'2 t. OneOf `[Int32, Int64, Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- truncateMod' :: forall v'1 v'2 t. OneOf `[Int32, Int64, Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- truncatedNormal :: forall v'1 dtype t m'. (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) => Tensor v'1 t -> m' (Tensor Value dtype)
- truncatedNormal' :: forall v'1 dtype t m'. (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype)
- uniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- uniformCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)
- unique :: forall v'1 t out_idx. (TensorType t, OneOf `[Int32, Int64]` out_idx) => Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx)
- unique' :: forall v'1 t out_idx. (TensorType t, OneOf `[Int32, Int64]` out_idx) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx)
- uniqueWithCounts :: forall v'1 t out_idx. (TensorType t, OneOf `[Int32, Int64]` out_idx) => Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx)
- uniqueWithCounts' :: forall v'1 t out_idx. (TensorType t, OneOf `[Int32, Int64]` out_idx) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx)
- unpack :: forall v'1 t. TensorType t => Int64 -> Tensor v'1 t -> [Tensor Build t]
- unpack' :: forall v'1 t. TensorType t => OpParams -> Int64 -> Tensor v'1 t -> [Tensor Build t]
- unsortedSegmentSum :: forall v'1 v'2 v'3 t tindices. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor v'3 Int32 -> Tensor Build t
- unsortedSegmentSum' :: forall v'1 v'2 v'3 t tindices. (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor v'3 Int32 -> Tensor Build t
- unstage :: forall dtypes m'. (MonadBuild m', TensorTypes dtypes) => m' (TensorList Value dtypes)
- unstage' :: forall dtypes m'. (MonadBuild m', TensorTypes dtypes) => OpParams -> m' (TensorList Value dtypes)
- varHandleOp :: forall m'. MonadBuild m' => DataType -> Shape -> m' ResourceHandle
- varHandleOp' :: forall m'. MonadBuild m' => OpParams -> DataType -> Shape -> m' ResourceHandle
- varIsInitializedOp :: forall m'. MonadBuild m' => ResourceHandle -> m' (Tensor Value Bool)
- varIsInitializedOp' :: forall m'. MonadBuild m' => OpParams -> ResourceHandle -> m' (Tensor Value Bool)
- variable :: forall dtype m'. (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype)
- variable' :: forall dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype)
- variableV2 :: forall dtype m'. (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype)
- variableV2' :: forall dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype)
- where' :: Tensor v'1 Bool -> Tensor Build Int64
- where'' :: OpParams -> Tensor v'1 Bool -> Tensor Build Int64
- wholeFileReader :: forall m'. MonadBuild m' => m' (Tensor Ref ByteString)
- wholeFileReader' :: forall m'. MonadBuild m' => OpParams -> m' (Tensor Ref ByteString)
- wholeFileReaderV2 :: forall m'. MonadBuild m' => m' ResourceHandle
- wholeFileReaderV2' :: forall m'. MonadBuild m' => OpParams -> m' ResourceHandle
- writeFile :: forall v'1 v'2 m'. MonadBuild m' => Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' ControlNode
- writeFile' :: forall v'1 v'2 m'. MonadBuild m' => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' ControlNode
- zerosLike :: forall v'1 t. TensorType t => Tensor v'1 t -> Tensor Build t
- zerosLike' :: forall v'1 t. TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t
- zeta :: forall v'1 v'2 t. OneOf `[Double, Float]` t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- zeta' :: forall v'1 v'2 t. OneOf `[Double, Float]` t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- _Arg :: forall t m'. (MonadBuild m', TensorType t) => Int64 -> m' (Tensor Value t)
- _Arg' :: forall t m'. (MonadBuild m', TensorType t) => OpParams -> Int64 -> m' (Tensor Value t)
- _ArrayToList :: forall v'1 t out_types. (TensorType t, TensorTypes out_types) => [Tensor v'1 t] -> TensorList Build out_types
- _ArrayToList' :: forall v'1 t out_types. (TensorType t, TensorTypes out_types) => OpParams -> [Tensor v'1 t] -> TensorList Build out_types
- _HostCast :: forall v'1 srcT dstT. (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT
- _HostCast' :: forall v'1 srcT dstT. (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT
- _HostRecv :: forall tensor_type m'. (MonadBuild m', TensorType tensor_type) => Int64 -> m' (Tensor Value tensor_type)
- _HostRecv' :: forall tensor_type m'. (MonadBuild m', TensorType tensor_type) => OpParams -> Int64 -> m' (Tensor Value tensor_type)
- _HostSend :: forall v'1 t m'. (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' ControlNode
- _HostSend' :: forall v'1 t m'. (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' ControlNode
- _ListToArray :: forall v'1 tin t. (TensorTypes tin, TensorType t) => Int64 -> TensorList v'1 tin -> [Tensor Build t]
- _ListToArray' :: forall v'1 tin t. (TensorTypes tin, TensorType t) => OpParams -> Int64 -> TensorList v'1 tin -> [Tensor Build t]
- _ParallelConcatStart :: forall dtype m'. (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Value dtype)
- _ParallelConcatStart' :: forall dtype m'. (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Value dtype)
- _ParallelConcatUpdate :: forall v'1 v'2 t. TensorType t => Int64 -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- _ParallelConcatUpdate' :: forall v'1 v'2 t. TensorType t => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t
- _Recv :: forall tensor_type m'. (MonadBuild m', TensorType tensor_type) => Int64 -> m' (Tensor Value tensor_type)
- _Recv' :: forall tensor_type m'. (MonadBuild m', TensorType tensor_type) => OpParams -> Int64 -> m' (Tensor Value tensor_type)
- _Retval :: forall v'1 t m'. (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' ControlNode
- _Retval' :: forall v'1 t m'. (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' ControlNode
- _Send :: forall v'1 t m'. (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' ControlNode
- _Send' :: forall v'1 t m'. (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' ControlNode
Documentation
abort :: forall m'. MonadBuild m' => m' ControlNode
Raise a exception to abort the process when called. If exit_without_error is true, the process will exit normally, otherwise it will exit with a SIGABORT signal.
Returns nothing but an exception.
abort' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode
Computes the absolute value of a tensor.
Given a tensor x
, this operation returns a tensor containing the absolute
value of each element in x
. For example, if x is an input element and y is
an output element, this operation computes \(y = |x|\).
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) | |
=> Tensor Ref ByteString | handle: The handle to a accumulator. |
-> Tensor v'2 Int64 | local_step: The local_step value at which the gradient was computed. |
-> Tensor v'3 dtype | gradient: A tensor of the gradient to be accumulated. |
-> m' ControlNode |
Applies a gradient to a given accumulator. Does not add if local_step is lesser
than the accumulator's global_step.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a accumulator. |
-> Tensor v'2 Int64 | local_step: The local_step value at which the gradient was computed. |
-> Tensor v'3 dtype | gradient: A tensor of the gradient to be accumulated. |
-> m' ControlNode |
:: MonadBuild m' | |
=> Tensor Ref ByteString | handle: The handle to an accumulator. |
-> m' (Tensor Value Int32) | num_accumulated: The number of gradients aggregated in the given accumulator. |
Returns the number of gradients aggregated in the given accumulators.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to an accumulator. |
-> m' (Tensor Value Int32) | num_accumulated: The number of gradients aggregated in the given accumulator. |
:: MonadBuild m' | |
=> Tensor Ref ByteString | handle: The handle to an accumulator. |
-> Tensor v'2 Int64 | new_global_step: The new global_step value to set. |
-> m' ControlNode |
Updates the accumulator with a new value for global_step. Logs warning if the
accumulator's value is already higher than new_global_step.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to an accumulator. |
-> Tensor v'2 Int64 | new_global_step: The new global_step value to set. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) | |
=> Tensor Ref ByteString | handle: The handle to an accumulator. |
-> Tensor v'2 Int32 | num_required: Number of gradients required before we return an aggregate. |
-> m' (Tensor Value dtype) | average: The average of the accumulated gradients. |
Extracts the average gradient in the given ConditionalAccumulator, provided
that sufficient (i.e., more than num_required) gradients have been accumulated. The op blocks until sufficient gradients have been accumulated. If the accumulator has already aggregated more than num_required gradients, it returns the average of the accumulated gradients. Also automatically increments the recorded global_step in the accumulator by 1, and resets the aggregate to 0.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to an accumulator. |
-> Tensor v'2 Int32 | num_required: Number of gradients required before we return an aggregate. |
-> m' (Tensor Value dtype) | average: The average of the accumulated gradients. |
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes acos of x element-wise.
:: OneOf `[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Returns x + y element-wise.
- NOTE*:
Add
supports broadcasting.AddN
does not. More about broadcasting here
:: (MonadBuild m', TensorType t) | |
=> Tensor v'1 Int64 | sparse_indices: 2-D. The |
-> Tensor v'2 t | sparse_values: 1-D. The |
-> Tensor v'3 Int64 | sparse_shape: 1-D. The |
-> m' (Tensor Value Int64) | sparse_handles: 1-D. The handles of the |
Add an N
-minibatch SparseTensor
to a SparseTensorsMap
, return N
handles.
A SparseTensor
of rank R
is represented by three tensors: sparse_indices
,
sparse_values
, and sparse_shape
, where
```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
An N
-minibatch of SparseTensor
objects is represented as a SparseTensor
having a first sparse_indices
column taking values between `[0, N)`, where
the minibatch size `N == sparse_shape[0]`.
The input SparseTensor
must have rank R
greater than 1, and the first
dimension is treated as the minibatch dimension. Elements of the SparseTensor
must be sorted in increasing order of this first dimension. The stored
SparseTensor
objects pointed to by each row of the output sparse_handles
will have rank `R-1`.
The SparseTensor
values can then be read out as part of a minibatch by passing
the given keys as vector elements to TakeManySparseFromTensorsMap
. To ensure
the correct SparseTensorsMap
is accessed, ensure that the same
container
and shared_name
are passed to that Op. If no shared_name
is provided here, instead use the *name* of the Operation created by calling
AddManySparseToTensorsMap
as the shared_name
passed to
TakeManySparseFromTensorsMap
. Ensure the Operations are colocated.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor v'1 Int64 | sparse_indices: 2-D. The |
-> Tensor v'2 t | sparse_values: 1-D. The |
-> Tensor v'3 Int64 | sparse_shape: 1-D. The |
-> m' (Tensor Value Int64) | sparse_handles: 1-D. The handles of the |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> [Tensor v'1 t] | inputs: Must all be the same size and shape. |
-> Tensor Build t | sum |
Add all input tensors element wise.
:: (MonadBuild m', TensorType t) | |
=> Tensor v'1 Int64 | sparse_indices: 2-D. The |
-> Tensor v'2 t | sparse_values: 1-D. The |
-> Tensor v'3 Int64 | sparse_shape: 1-D. The |
-> m' (Tensor Value Int64) | sparse_handle: 0-D. The handle of the |
Add a SparseTensor
to a SparseTensorsMap
return its handle.
A SparseTensor
is represented by three tensors: sparse_indices
,
sparse_values
, and sparse_shape
.
This operator takes the given SparseTensor
and adds it to a container
object (a SparseTensorsMap
). A unique key within this container is generated
in the form of an int64
, and this is the value that is returned.
The SparseTensor
can then be read out as part of a minibatch by passing
the key as a vector element to TakeManySparseFromTensorsMap
. To ensure
the correct SparseTensorsMap
is accessed, ensure that the same
container
and shared_name
are passed to that Op. If no shared_name
is provided here, instead use the *name* of the Operation created by calling
AddSparseToTensorsMap
as the shared_name
passed to
TakeManySparseFromTensorsMap
. Ensure the Operations are colocated.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor v'1 Int64 | sparse_indices: 2-D. The |
-> Tensor v'2 t | sparse_values: 1-D. The |
-> Tensor v'3 Int64 | sparse_shape: 1-D. The |
-> m' (Tensor Value Int64) | sparse_handle: 0-D. The handle of the |
:: OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t | |
=> Tensor v'1 t | images |
-> Tensor v'2 Float | contrast_factor |
-> Tensor v'3 Float | min_value |
-> Tensor v'4 Float | max_value |
-> Tensor Build Float | output |
Deprecated. Disallowed in GraphDef version >= 2.
:: Tensor v'1 Float | images: Images to adjust. At least 3-D. |
-> Tensor v'2 Float | contrast_factor: A float multiplier for adjusting contrast. |
-> Tensor Build Float | output: The contrast-adjusted image or images. |
Adjust the contrast of one or more images.
images
is a tensor of at least 3 dimensions. The last 3 dimensions are
interpreted as `[height, width, channels]`. The other dimensions only
represent a collection of images, such as `[batch, height, width, channels].`
Contrast is adjusted independently for each channel of each image.
For each channel, the Op first computes the mean of the image pixels in the channel and then adjusts each component of each pixel to `(x - mean) * contrast_factor + mean`.
:: Tensor v'1 Float | images: Images to adjust. At least 3-D. |
-> Tensor v'2 Float | delta: A float delta to add to the hue. |
-> Tensor Build Float | output: The hue-adjusted image or images. |
Adjust the hue of one or more images.
images
is a tensor of at least 3 dimensions. The last dimension is
interpretted as channels, and must be three.
The input image is considered in the RGB colorspace. Conceptually, the RGB colors are first mapped into HSV. A delta is then applied all the hue values, and then remapped back to RGB colorspace.
:: Tensor v'1 Float | images: Images to adjust. At least 3-D. |
-> Tensor v'2 Float | scale: A float scale to add to the saturation. |
-> Tensor Build Float | output: The hue-adjusted image or images. |
Adjust the saturation of one or more images.
images
is a tensor of at least 3 dimensions. The last dimension is
interpretted as channels, and must be three.
The input image is considered in the RGB colorspace. Conceptually, the RGB colors are first mapped into HSV. A scale is then applied all the saturation values, and then remapped back to RGB colorspace.
:: OneOf `[Int32, Int64]` tidx | |
=> Tensor v'1 Bool | input: The tensor to reduce. |
-> Tensor v'2 tidx | reduction_indices: The dimensions to reduce. |
-> Tensor Build Bool | output: The reduced tensor. |
Computes the "logical and" of elements across dimensions of a tensor.
Reduces input
along the dimensions given in reduction_indices
. Unless
keep_dims
is true, the rank of the tensor is reduced by 1 for each entry in
reduction_indices
. If keep_dims
is true, the reduced dimensions are
retained with length 1.
:: Int64 | num_sampled: Number of candidates to produce per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
Generates labels for candidate sampling with a learned unigram distribution.
See explanations of candidate sampling and the data formats at go/candidate-sampling.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
:: OpParams | |
-> Int64 | num_sampled: Number of candidates to produce per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
:: OneOf `[Int32, Int64]` tidx | |
=> Tensor v'1 Bool | input: The tensor to reduce. |
-> Tensor v'2 tidx | reduction_indices: The dimensions to reduce. |
-> Tensor Build Bool | output: The reduced tensor. |
Computes the "logical or" of elements across dimensions of a tensor.
Reduces input
along the dimensions given in reduction_indices
. Unless
keep_dims
is true, the rank of the tensor is reduced by 1 for each entry in
reduction_indices
. If keep_dims
is true, the reduced dimensions are
retained with length 1.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor Ref t | accum_update: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay factor. Must be a scalar. |
-> Tensor v'6 t | epsilon: Constant factor. Must be a scalar. |
-> Tensor v'7 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' according to the adadelta scheme.
accum = rho() * accum + (1 - rho()) * grad.square(); update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; update_accum = rho() * update_accum + (1 - rho()) * update.square(); var -= update;
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor Ref t | accum_update: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay factor. Must be a scalar. |
-> Tensor v'6 t | epsilon: Constant factor. Must be a scalar. |
-> Tensor v'7 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' according to the adagrad scheme.
accum += grad * grad var -= lr * grad * (1 / sqrt(accum))
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | gradient_accumulator: Should be from a Variable(). |
-> Tensor Ref t | gradient_squared_accumulator: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'7 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'8 Int64 | global_step: Training step number. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' according to the proximal adagrad scheme.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | gradient_accumulator: Should be from a Variable(). |
-> Tensor Ref t | gradient_squared_accumulator: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'7 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'8 Int64 | global_step: Training step number. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | m: Should be from a Variable(). |
-> Tensor Ref t | v: Should be from a Variable(). |
-> Tensor v'4 t | beta1_power: Must be a scalar. |
-> Tensor v'5 t | beta2_power: Must be a scalar. |
-> Tensor v'6 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'7 t | beta1: Momentum factor. Must be a scalar. |
-> Tensor v'8 t | beta2: Momentum factor. Must be a scalar. |
-> Tensor v'9 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'10 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' according to the Adam algorithm.
lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | m: Should be from a Variable(). |
-> Tensor Ref t | v: Should be from a Variable(). |
-> Tensor v'4 t | beta1_power: Must be a scalar. |
-> Tensor v'5 t | beta2_power: Must be a scalar. |
-> Tensor v'6 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'7 t | beta1: Momentum factor. Must be a scalar. |
-> Tensor v'8 t | beta2: Momentum factor. Must be a scalar. |
-> Tensor v'9 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'10 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | mg: Should be from a Variable(). |
-> Tensor Ref t | ms: Should be from a Variable(). |
-> Tensor Ref t | mom: Should be from a Variable(). |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'7 t | momentum |
-> Tensor v'8 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'9 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment (i.e., the variance) for normalization, as opposed to regular RMSProp, which uses the (uncentered) second moment. This often helps with training, but is slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will update even if the grad is zero, but in this sparse implementation, mg, ms, and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
mg <- rho * mg_{t-1} + (1-rho) * grad ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) var <- var - mom
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | mg: Should be from a Variable(). |
-> Tensor Ref t | ms: Should be from a Variable(). |
-> Tensor Ref t | mom: Should be from a Variable(). |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'7 t | momentum |
-> Tensor v'8 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'9 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor Ref t | linear: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | l1: L1 regulariation. Must be a scalar. |
-> Tensor v'7 t | l2: L2 regulariation. Must be a scalar. |
-> Tensor v'8 t | lr_power: Scaling factor. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' according to the Ftrl-proximal scheme.
accum_new = accum + grad * grad linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor Ref t | linear: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | l1: L1 regulariation. Must be a scalar. |
-> Tensor v'7 t | l2: L2 regulariation. Must be a scalar. |
-> Tensor v'8 t | lr_power: Scaling factor. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor v'2 t | alpha: Scaling factor. Must be a scalar. |
-> Tensor v'3 t | delta: The change. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' by subtracting alpha
* delta
from it.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor v'2 t | alpha: Scaling factor. Must be a scalar. |
-> Tensor v'3 t | delta: The change. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | momentum: Momentum. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' according to the momentum scheme. Set use_nesterov = True if you
want to use Nesterov momentum.
accum = accum * momentum + grad var -= lr * accum
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | momentum: Momentum. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'5 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'6 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'5 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'6 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor v'2 t | alpha: Scaling factor. Must be a scalar. |
-> Tensor v'3 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'4 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'5 t | delta: The change. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' as FOBOS algorithm with fixed learning rate.
prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor v'2 t | alpha: Scaling factor. Must be a scalar. |
-> Tensor v'3 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'4 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'5 t | delta: The change. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | ms: Should be from a Variable(). |
-> Tensor Ref t | mom: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'6 t | momentum |
-> Tensor v'7 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'8 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will update even if the grad is zero, but in this sparse implementation, ms and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | ms: Should be from a Variable(). |
-> Tensor Ref t | mom: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'6 t | momentum |
-> Tensor v'7 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'8 t | grad: The gradient. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | input |
-> Tensor v'2 tidx | dimension: int32, 0 <= dimension < rank(input). Describes which dimension of the input Tensor to reduce across. For vectors, use dimension = 0. |
-> Tensor Build Int64 | output |
Returns the index with the largest value across dimensions of a tensor.
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> OpParams | |
-> Tensor v'1 t | input |
-> Tensor v'2 tidx | dimension: int32, 0 <= dimension < rank(input). Describes which dimension of the input Tensor to reduce across. For vectors, use dimension = 0. |
-> Tensor Build Int64 | output |
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | input |
-> Tensor v'2 tidx | dimension: int32, 0 <= dimension < rank(input). Describes which dimension of the input Tensor to reduce across. For vectors, use dimension = 0. |
-> Tensor Build Int64 | output |
Returns the index with the smallest value across dimensions of a tensor.
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> OpParams | |
-> Tensor v'1 t | input |
-> Tensor v'2 tidx | dimension: int32, 0 <= dimension < rank(input). Describes which dimension of the input Tensor to reduce across. For vectors, use dimension = 0. |
-> Tensor Build Int64 | output |
:: OneOf `[Complex Float, Bool, Int32, Int64, Int8, Double, Float]` t | |
=> Tensor v'1 t | input |
-> Tensor Build ByteString | output |
Converts each entry in the given tensor to strings. Supports many numeric
types and boolean.
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes asin of x element-wise.
:: (MonadBuild m', TensorTypes t) | |
=> Tensor v'1 Bool | condition: The condition to evaluate. |
-> TensorList v'2 t | data: The tensors to print out when condition is false. |
-> m' ControlNode |
Asserts that the given condition is true.
If condition
evaluates to false, print the list of tensors in `data`.
summarize
determines how many entries of the tensors to print.
:: (MonadBuild m', TensorTypes t) | |
=> OpParams | |
-> Tensor v'1 Bool | condition: The condition to evaluate. |
-> TensorList v'2 t | data: The tensors to print out when condition is false. |
-> m' ControlNode |
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 t | value: The value to be assigned to the variable. |
-> m' (Tensor Ref t) | output_ref: = Same as "ref". Returned as a convenience for operations that want to use the new value after the variable has been reset. |
Update ref
by assigning value
to it.
This operation outputs "ref" after the assignment is done. This makes it easier to chain operations that need to use the reset value.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 t | value: The value to be assigned to the variable. |
-> m' (Tensor Ref t) | output_ref: = Same as "ref". Returned as a convenience for operations that want to use the new value after the variable has been reset. |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 t | value: The value to be added to the variable. |
-> m' (Tensor Ref t) | output_ref: = Same as "ref". Returned as a convenience for operations that want to use the new value after the variable has been updated. |
Update ref
by adding value
to it.
This operation outputs "ref" after the update is done. This makes it easier to chain operations that need to use the reset value.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 t | value: The value to be added to the variable. |
-> m' (Tensor Ref t) | output_ref: = Same as "ref". Returned as a convenience for operations that want to use the new value after the variable has been updated. |
:: (MonadBuild m', TensorType dtype) | |
=> ResourceHandle | resource: handle to the resource in which to store the variable. |
-> Tensor v'2 dtype | value: the value by which the variable will be incremented. |
-> m' ControlNode |
Adds a value to the current value of a variable.
Any ReadVariableOp which depends directly or indirectly on this assign is guaranteed to see the incremented value or a subsequent newer one.
Outputs the incremented value, which can be used to totally order the increments to this variable.
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> ResourceHandle | resource: handle to the resource in which to store the variable. |
-> Tensor v'2 dtype | value: the value by which the variable will be incremented. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 t | value: The value to be subtracted to the variable. |
-> m' (Tensor Ref t) | output_ref: = Same as "ref". Returned as a convenience for operations that want to use the new value after the variable has been updated. |
Update ref
by subtracting value
from it.
This operation outputs "ref" after the update is done. This makes it easier to chain operations that need to use the reset value.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 t | value: The value to be subtracted to the variable. |
-> m' (Tensor Ref t) | output_ref: = Same as "ref". Returned as a convenience for operations that want to use the new value after the variable has been updated. |
:: (MonadBuild m', TensorType dtype) | |
=> ResourceHandle | resource: handle to the resource in which to store the variable. |
-> Tensor v'2 dtype | value: the value to set the new tensor to use. |
-> m' ControlNode |
Assigns a new value to a variable.
Any ReadVariableOp with a control dependency on this op is guaranteed to return this value or a subsequent newer value of the variable.
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> ResourceHandle | resource: handle to the resource in which to store the variable. |
-> Tensor v'2 dtype | value: the value to set the new tensor to use. |
-> m' ControlNode |
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes atan of x element-wise.
:: Float | sample_rate: The sample rate of the signal in hertz. |
-> Tensor v'1 ByteString | tag: Scalar. Used to build the |
-> Tensor v'2 Float | tensor: 2-D of shape `[batch_size, frames]`. |
-> Tensor Build ByteString | summary: Scalar. Serialized |
Outputs a Summary
protocol buffer with audio.
The summary has up to max_outputs
summary values containing audio. The
audio is built from tensor
which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of sample_rate
.
The tag
argument is a scalar Tensor
of type string
. It is used to
build the tag
of the summary values:
- If
max_outputs
is 1, the summary value tag is '*tag*/audio'. - If
max_outputs
is greater than 1, the summary value tags are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
:: OpParams | |
-> Float | sample_rate: The sample rate of the signal in hertz. |
-> Tensor v'1 ByteString | tag: Scalar. Used to build the |
-> Tensor v'2 Float | tensor: 2-D of shape `[batch_size, frames]`. |
-> Tensor Build ByteString | summary: Scalar. Serialized |
:: Tensor v'1 ByteString | tag: Scalar. Used to build the |
-> Tensor v'2 Float | tensor: 2-D of shape `[batch_size, frames]`. |
-> Tensor v'3 Float | sample_rate: The sample rate of the signal in hertz. |
-> Tensor Build ByteString | summary: Scalar. Serialized |
Outputs a Summary
protocol buffer with audio.
The summary has up to max_outputs
summary values containing audio. The
audio is built from tensor
which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of sample_rate
.
The tag
argument is a scalar Tensor
of type string
. It is used to
build the tag
of the summary values:
- If
max_outputs
is 1, the summary value tag is '*tag*/audio'. - If
max_outputs
is greater than 1, the summary value tags are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
:: OpParams | |
-> Tensor v'1 ByteString | tag: Scalar. Used to build the |
-> Tensor v'2 Float | tensor: 2-D of shape `[batch_size, frames]`. |
-> Tensor v'3 Float | sample_rate: The sample rate of the signal in hertz. |
-> Tensor Build ByteString | summary: Scalar. Serialized |
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 t | value: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor Build t | output: The average pooled output tensor. |
Performs average pooling on the input.
Each entry in output
is the mean of the corresponding size ksize
window in value
.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. |
-> Tensor Build t | output: The average pooled output tensor. |
Performs 3D average pooling on the input.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 Int32 | orig_input_shape: The original input dimensions. |
-> Tensor v'2 t | grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. |
-> Tensor Build t | output: The backprop for input. |
Computes gradients of average pooling function.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int32 | orig_input_shape: The original input dimensions. |
-> Tensor v'2 t | grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. |
-> Tensor Build t | output: The backprop for input. |
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 Int32 | orig_input_shape: 1-D. Shape of the original input to |
-> Tensor v'2 t | grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t.
the output of |
-> Tensor Build t | output: 4-D. Gradients w.r.t. the input of |
Computes gradients of the average pooling function.
:: OneOf `[Word16, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int32 | orig_input_shape: 1-D. Shape of the original input to |
-> Tensor v'2 t | grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t.
the output of |
-> Tensor Build t | output: 4-D. Gradients w.r.t. the input of |
:: MonadBuild m' | |
=> [DataType] | component_types: The type of each component in a value. |
-> m' (Tensor Ref ByteString) | handle: The handle to the barrier. |
Defines a barrier that persists across different graph executions.
A barrier represents a key-value map, where each key is a string, and each value is a tuple of tensors.
At runtime, the barrier contains complete
and incomplete
elements. A complete element has defined tensors for all components of
its value tuple, and may be accessed using BarrierTakeMany. An
incomplete element has some undefined components in its value tuple,
and may be updated using BarrierInsertMany.
:: MonadBuild m' | |
=> OpParams | |
-> [DataType] | component_types: The type of each component in a value. |
-> m' (Tensor Ref ByteString) | handle: The handle to the barrier. |
:: MonadBuild m' | |
=> Tensor Ref ByteString | handle: The handle to a barrier. |
-> m' ControlNode |
Closes the given barrier.
This operation signals that no more new elements will be inserted in the given barrier. Subsequent InsertMany that try to introduce a new key will fail. Subsequent InsertMany operations that just add missing components to already existing elements will continue to succeed. Subsequent TakeMany operations will continue to succeed if sufficient completed elements remain in the barrier. Subsequent TakeMany operations that would block will fail immediately.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a barrier. |
-> m' ControlNode |
:: MonadBuild m' | |
=> Tensor Ref ByteString | handle: The handle to a barrier. |
-> m' (Tensor Value Int32) | size: The number of incomplete elements (i.e. those with some of their value components not set) in the barrier. |
Computes the number of incomplete elements in the given barrier.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a barrier. |
-> m' (Tensor Value Int32) | size: The number of incomplete elements (i.e. those with some of their value components not set) in the barrier. |
:: (MonadBuild m', TensorType t) | |
=> Int64 | component_index: The component of the barrier elements that is being assigned. |
-> Tensor Ref ByteString | handle: The handle to a barrier. |
-> Tensor v'2 ByteString | keys: A one-dimensional tensor of keys, with length n. |
-> Tensor v'3 t | values: An any-dimensional tensor of values, which are associated with the respective keys. The 0th dimension must have length n. |
-> m' ControlNode |
For each key, assigns the respective value to the specified component.
If a key is not found in the barrier, this operation will create a new incomplete element. If a key is found in the barrier, and the element already has a value at component_index, this operation will fail with INVALID_ARGUMENT, and leave the barrier in an undefined state.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Int64 | component_index: The component of the barrier elements that is being assigned. |
-> Tensor Ref ByteString | handle: The handle to a barrier. |
-> Tensor v'2 ByteString | keys: A one-dimensional tensor of keys, with length n. |
-> Tensor v'3 t | values: An any-dimensional tensor of values, which are associated with the respective keys. The 0th dimension must have length n. |
-> m' ControlNode |
:: MonadBuild m' | |
=> Tensor Ref ByteString | handle: The handle to a barrier. |
-> m' (Tensor Value Int32) | size: The number of complete elements (i.e. those with all of their value components set) in the barrier. |
Computes the number of complete elements in the given barrier.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a barrier. |
-> m' (Tensor Value Int32) | size: The number of complete elements (i.e. those with all of their value components set) in the barrier. |
:: (MonadBuild m', TensorTypes component_types) | |
=> Tensor Ref ByteString | handle: The handle to a barrier. |
-> Tensor v'2 Int32 | num_elements: A single-element tensor containing the number of elements to take. |
-> m' (Tensor Value Int64, Tensor Value ByteString, TensorList Value component_types) | (indices, keys, values)
|
Takes the given number of completed elements from a barrier.
This operation concatenates completed-element component tensors along the 0th dimension to make a single component tensor.
Elements come out of the barrier when they are complete, and in the order in which they were placed into the barrier. The indices output provides information about the batch in which each element was originally inserted into the barrier.
:: (MonadBuild m', TensorTypes component_types) | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a barrier. |
-> Tensor v'2 Int32 | num_elements: A single-element tensor containing the number of elements to take. |
-> m' (Tensor Value Int64, Tensor Value ByteString, TensorList Value component_types) | (indices, keys, values)
|
:: OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t | |
=> Tensor v'1 t | x: 3-D or higher with shape `[..., r_x, c_x]`. |
-> Tensor v'2 t | y: 3-D or higher with shape `[..., r_y, c_y]`. |
-> Tensor Build t | output: 3-D or higher with shape `[..., r_o, c_o]` |
Multiplies slices of two tensors in batches.
Multiplies all slices of Tensor
x
and y
(each slice can be
viewed as an element of a batch), and arranges the individual results
in a single output tensor of the same batch size. Each of the
individual slices can optionally be adjointed (to adjoint a matrix
means to transpose and conjugate it) before multiplication by setting
the adj_x
or adj_y
flag to True
, which are by default False
.
The input tensors x
and y
are 3-D or higher with shape `[..., r_x, c_x]`
and `[..., r_y, c_y]`.
The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, where:
r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y
It is computed as:
output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
:: TensorType t | |
=> Tensor v'1 t | diagonal |
-> Tensor Build t | output |
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | diagonal |
-> Tensor Build t | output |
:: TensorType t | |
=> Tensor v'1 t | input |
-> Tensor Build t | diagonal |
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | input |
-> Tensor Build t | diagonal |
:: TensorType t | |
=> Tensor v'1 t | input |
-> Tensor v'2 t | diagonal |
-> Tensor Build t | output |
batchNormWithGlobalNormalization
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Bool | scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. |
-> Float | variance_epsilon: A small float number to avoid dividing by 0. |
-> Tensor v'1 t | t: A 4D input Tensor. |
-> Tensor v'2 t | m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'3 t | v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'4 t | beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. |
-> Tensor v'5 t | gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor. |
-> Tensor Build t | result |
Batch normalization.
This op is deprecated. Prefer `tf.nn.batch_normalization`.
batchNormWithGlobalNormalization'
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Bool | scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. |
-> Float | variance_epsilon: A small float number to avoid dividing by 0. |
-> Tensor v'1 t | t: A 4D input Tensor. |
-> Tensor v'2 t | m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'3 t | v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'4 t | beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. |
-> Tensor v'5 t | gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor. |
-> Tensor Build t | result |
batchNormWithGlobalNormalizationGrad
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Bool | scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. |
-> Float | variance_epsilon: A small float number to avoid dividing by 0. |
-> Tensor v'1 t | t: A 4D input Tensor. |
-> Tensor v'2 t | m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'3 t | v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'4 t | gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this Tensor will be multiplied with the normalized Tensor. |
-> Tensor v'5 t | backprop: 4D backprop Tensor. |
-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) | (dx, dm, dv, db, dg)
|
Gradients for batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
batchNormWithGlobalNormalizationGrad'
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Bool | scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. |
-> Float | variance_epsilon: A small float number to avoid dividing by 0. |
-> Tensor v'1 t | t: A 4D input Tensor. |
-> Tensor v'2 t | m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'3 t | v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'4 t | gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this Tensor will be multiplied with the normalized Tensor. |
-> Tensor v'5 t | backprop: 4D backprop Tensor. |
-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) | (dx, dm, dv, db, dg)
|
:: (TensorType t, OneOf `[Int32, Int64]` tidx) | |
=> Int64 | block_size |
-> Tensor v'1 t | input: 4-D tensor with shape `[batch*block_size*block_size, height_padblock_size, width_padblock_size, depth]`. Note that the batch size of the input tensor must be divisible by `block_size * block_size`. |
-> Tensor v'2 tidx | crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies how many elements to crop from the intermediate result across the spatial dimensions as follows: crops = [[crop_top, crop_bottom], [crop_left, crop_right]] |
-> Tensor Build t | output: 4-D with shape `[batch, height, width, depth]`, where: height = height_pad - crop_top - crop_bottom width = width_pad - crop_left - crop_right The attr Some examples:
```prettyprint [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ``` The output tensor has shape `[1, 2, 2, 1]` and value: ```prettyprint x = [[[[1], [2]], [[3], [4]]]] ```
```prettyprint [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ``` The output tensor has shape `[1, 2, 2, 3]` and value: ```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ```
```prettyprint x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ``` The output tensor has shape `[1, 4, 4, 1]` and value: ```prettyprint x = [[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]] ```
```prettyprint x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] ``` The output tensor has shape `[2, 2, 4, 1]` and value: ```prettyprint x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ``` |
BatchToSpace for 4-D tensors of type T.
This is a legacy version of the more general BatchToSpaceND.
Rearranges (permutes) data from batch into blocks of spatial data, followed by
cropping. This is the reverse transformation of SpaceToBatch. More specifically,
this op outputs a copy of the input tensor where values from the batch
dimension are moved in spatial blocks to the height
and width
dimensions,
followed by cropping along the height
and width
dimensions.
:: (TensorType t, OneOf `[Int32, Int64]` tidx) | |
=> OpParams | |
-> Int64 | block_size |
-> Tensor v'1 t | input: 4-D tensor with shape `[batch*block_size*block_size, height_padblock_size, width_padblock_size, depth]`. Note that the batch size of the input tensor must be divisible by `block_size * block_size`. |
-> Tensor v'2 tidx | crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies how many elements to crop from the intermediate result across the spatial dimensions as follows: crops = [[crop_top, crop_bottom], [crop_left, crop_right]] |
-> Tensor Build t | output: 4-D with shape `[batch, height, width, depth]`, where: height = height_pad - crop_top - crop_bottom width = width_pad - crop_left - crop_right The attr Some examples:
```prettyprint [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ``` The output tensor has shape `[1, 2, 2, 1]` and value: ```prettyprint x = [[[[1], [2]], [[3], [4]]]] ```
```prettyprint [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ``` The output tensor has shape `[1, 2, 2, 3]` and value: ```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ```
```prettyprint x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ``` The output tensor has shape `[1, 4, 4, 1]` and value: ```prettyprint x = [[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]] ```
```prettyprint x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] ``` The output tensor has shape `[2, 2, 4, 1]` and value: ```prettyprint x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ``` |
:: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tcrops) | |
=> Tensor v'1 t | input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, where spatial_shape has M dimensions. |
-> Tensor v'2 tblock_shape | block_shape: 1-D with shape `[M]`, all values must be >= 1. |
-> Tensor v'3 tcrops | crops: 2-D with shape `[M, 2]`, all values must be >= 0.
`crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
dimension `i + 1`, which corresponds to spatial dimension This operation is equivalent to the following steps:
input_shape[1], block_shape[0], ..., input_shape[M], block_shape[M-1], input_shape[M+1], ..., input_shape[N-1]]
input_shape[1] * block_shape[0], ..., input_shape[M] * block_shape[M-1], input_shape[M+1], ..., input_shape[N-1]]
input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1], ..., input_shape[N-1]] Some examples:
```prettyprint [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ``` The output tensor has shape `[1, 2, 2, 1]` and value: ```prettyprint x = [[[[1], [2]], [[3], [4]]]] ```
```prettyprint [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ``` The output tensor has shape `[1, 2, 2, 3]` and value: ```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ```
```prettyprint x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ``` The output tensor has shape `[1, 4, 4, 1]` and value: ```prettyprint x = [[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]] ```
```prettyprint x = [[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]] ``` The output tensor has shape `[2, 2, 4, 1]` and value: ```prettyprint x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` |
-> Tensor Build t | output |
BatchToSpace for N-D tensors of type T.
This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
`block_shape + [batch]`, interleaves these blocks back into the grid defined by
the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
the input. The spatial dimensions of this intermediate result are then
optionally cropped according to crops
to produce the output. This is the
reverse of SpaceToBatch. See below for a precise description.
:: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tcrops) | |
=> OpParams | |
-> Tensor v'1 t | input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, where spatial_shape has M dimensions. |
-> Tensor v'2 tblock_shape | block_shape: 1-D with shape `[M]`, all values must be >= 1. |
-> Tensor v'3 tcrops | crops: 2-D with shape `[M, 2]`, all values must be >= 0.
`crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
dimension `i + 1`, which corresponds to spatial dimension This operation is equivalent to the following steps:
input_shape[1], block_shape[0], ..., input_shape[M], block_shape[M-1], input_shape[M+1], ..., input_shape[N-1]]
input_shape[1] * block_shape[0], ..., input_shape[M] * block_shape[M-1], input_shape[M+1], ..., input_shape[N-1]]
input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1], ..., input_shape[N-1]] Some examples:
```prettyprint [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ``` The output tensor has shape `[1, 2, 2, 1]` and value: ```prettyprint x = [[[[1], [2]], [[3], [4]]]] ```
```prettyprint [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ``` The output tensor has shape `[1, 2, 2, 3]` and value: ```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ```
```prettyprint x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ``` The output tensor has shape `[1, 4, 4, 1]` and value: ```prettyprint x = [[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]] ```
```prettyprint x = [[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]] ``` The output tensor has shape `[2, 2, 4, 1]` and value: ```prettyprint x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` |
-> Tensor Build t | output |
Compute the regularized incomplete beta integral \(I_x(a, b)\).
The regularized incomplete beta integral is defined as:
``` I_x(a, b) = frac{B(x; a, b)}{B(a, b)} ``` where
``` B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt ```
is the incomplete beta function and \(B(a, b)\) is the *complete* beta function.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | out_backprop: Any number of dimensions. |
-> Tensor Build t | output: 1-D with size the feature dimension of |
The backward operation for BiasAdd on the "bias" tensor.
It accumulates all the values from out_backprop into the feature dimension. For NHWC data format, the feature dimension is the last. For NCHW data format, the feature dimension is the third-to-last.
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` type') | |
=> Tensor v'1 t | input |
-> Tensor Build type' | output |
Bitcasts a tensor from one type to another without copying data.
Given a tensor input
, this operation returns a tensor that has the same buffer
data as input
with datatype `type`.
If the input datatype T
is larger than the output datatype `type` then the
shape changes from [...] to [..., sizeof(T
)/sizeof(`type`)].
If T
is smaller than `type`, the operator requires that the rightmost
dimension be equal to sizeof(`type`)/sizeof(T
). The shape then goes from
[..., sizeof(`type`)/sizeof(T
)] to [...].
- NOTE*: Bitcast is implemented as a low-level cast, so machines with different endian orderings will give different results.
Return the shape of s0 op s1 with broadcast.
Given s0
and s1
, tensors that represent shapes, compute r0
, the
broadcasted shape. s0
, s1
and r0
are all integer vectors.
:: OneOf `[Int32, Int64]` t | |
=> Tensor v'1 t | s0 |
-> Tensor v'2 t | s1 |
-> (Tensor Build t, Tensor Build t) | (r0, r1)
|
Return the reduction indices for computing gradients of s0 op s1 with broadcast.
This is typically used by gradient computations for a broadcasting operation.
:: Int64 | beam_width: A scalar >= 0 (beam search beam width). |
-> Int64 | top_paths: A scalar >= 0, <= beam_width (controls output size). |
-> Tensor v'1 Float | inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. |
-> Tensor v'2 Int32 | sequence_length: A vector containing sequence lengths, size `(batch)`. |
-> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float) | (decoded_indices, decoded_values, decoded_shape, log_probability)
|
Performs beam search decoding on the logits given in input.
A note about the attribute merge_repeated: For the beam search decoder, this means that if consecutive entries in a beam are the same, only the first of these is emitted. That is, when the top path is "A B B B B", "A B" is returned if merge_repeated = True but "A B B B B" is returned if merge_repeated = False.
:: OpParams | |
-> Int64 | beam_width: A scalar >= 0 (beam search beam width). |
-> Int64 | top_paths: A scalar >= 0, <= beam_width (controls output size). |
-> Tensor v'1 Float | inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. |
-> Tensor v'2 Int32 | sequence_length: A vector containing sequence lengths, size `(batch)`. |
-> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float) | (decoded_indices, decoded_values, decoded_shape, log_probability)
|
:: Tensor v'1 Float | inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. |
-> Tensor v'2 Int32 | sequence_length: A vector containing sequence lengths, size `(batch_size)`. |
-> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float) | (decoded_indices, decoded_values, decoded_shape, log_probability)
|
Performs greedy decoding on the logits given in inputs.
A note about the attribute merge_repeated: if enabled, when
consecutive logits' maximum indices are the same, only the first of
these is emitted. Labeling the blank *
, the sequence "A B B * B B"
becomes "A B" if merge_repeated = True and "A B B B B" if
merge_repeated = False.
Regardless of the value of merge_repeated, if the maximum index of a given time and batch corresponds to the blank, index `(num_classes - 1)`, no new element is emitted.
:: OpParams | |
-> Tensor v'1 Float | inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. |
-> Tensor v'2 Int32 | sequence_length: A vector containing sequence lengths, size `(batch_size)`. |
-> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float) | (decoded_indices, decoded_values, decoded_shape, log_probability)
|
:: Tensor v'1 Float | inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. |
-> Tensor v'2 Int64 | labels_indices: The indices of a `SparseTensor2`. `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for `(batch b, time t)`. |
-> Tensor v'3 Int32 | labels_values: The values (labels) associated with the given batch and time. |
-> Tensor v'4 Int32 | sequence_length: A vector containing sequence lengths (batch). |
-> (Tensor Build Float, Tensor Build Float) | (loss, gradient)
|
Calculates the CTC Loss (log probability) for each batch entry. Also calculates
the gradient. This class performs the softmax operation for you, so inputs should be e.g. linear projections of outputs by an LSTM.
:: OpParams | |
-> Tensor v'1 Float | inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. |
-> Tensor v'2 Int64 | labels_indices: The indices of a `SparseTensor2`. `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for `(batch b, time t)`. |
-> Tensor v'3 Int32 | labels_values: The values (labels) associated with the given batch and time. |
-> Tensor v'4 Int32 | sequence_length: A vector containing sequence lengths (batch). |
-> (Tensor Build Float, Tensor Build Float) | (loss, gradient)
|
:: (TensorType srcT, TensorType dstT) | |
=> Tensor v'1 srcT | x |
-> Tensor Build dstT | y |
Cast x of type SrcT to y of DstT.
:: (TensorType srcT, TensorType dstT) | |
=> OpParams | |
-> Tensor v'1 srcT | x |
-> Tensor Build dstT | y |
Returns element-wise smallest integer in not less than x.
Checks a tensor for NaN and Inf values.
When run, reports an InvalidArgument
error if tensor
has any values
that are not a number (NaN) or infinity (Inf). Otherwise, passes tensor
as-is.
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | input: Shape is `[..., M, M]`. |
-> Tensor Build t | output: Shape is `[..., M, M]`. |
Computes the Cholesky decomposition of one or more square matrices.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices, with the same constraints as the single matrix Cholesky decomposition above. The output is a tensor of the same shape as the input containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. Algorithm depends only on lower triangular part of the innermost matrices of this tensor. |
-> Tensor v'2 t | grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. Algorithm depends only on lower triangular part of the innermost matrices of this tensor. |
-> Tensor Build t | output: Symmetrized version of df/dA . Shape is `[..., M, M]` |
Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
For an explanation see "Differentiation of the Cholesky algorithm" by Iain Murray http://arxiv.org/abs/1602.07527.
:: OneOf `[Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. Algorithm depends only on lower triangular part of the innermost matrices of this tensor. |
-> Tensor v'2 t | grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. Algorithm depends only on lower triangular part of the innermost matrices of this tensor. |
-> Tensor Build t | output: Symmetrized version of df/dA . Shape is `[..., M, M]` |
:: (OneOf `[Double, Float]` t, OneOf `[Complex Double, Complex Float]` tout) | |
=> Tensor v'1 t | real |
-> Tensor v'2 t | imag |
-> Tensor Build tout | out |
Converts two real numbers to a complex number.
Given a tensor real
representing the real part of a complex number, and a
tensor imag
representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \(a + bj\), where
*a* represents the real
part and *b* represents the imag
part.
The input tensors real
and imag
must have the same shape.
For example:
```
# tensor real
is [2.25, 3.25]
# tensor imag
is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
:: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) | |
=> Tensor v'1 t | x |
-> Tensor Build tout | y |
Computes the complex absolute value of a tensor.
Given a tensor x
of complex numbers, this operation returns a tensor of type
float
or double
that is the absolute value of each element in x
. All
elements in x
must be complex numbers of the form \(a + bj\). The absolute
value is computed as \( sqrt{a^2 + b^2}\).
:: Int64 | num_true: Number of true labels per context. |
-> Tensor v'1 Int64 | true_classes: The true_classes output of UnpackSparseLabels. |
-> Tensor v'2 Int64 | sampled_candidates: The sampled_candidates output of CandidateSampler. |
-> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float) | (indices, ids, weights)
|
Computes the ids of the positions in sampled_candidates that match true_labels.
When doing log-odds NCE, the result of this op should be passed through a
SparseToDense op, then added to the logits of the sampled candidates. This has
the effect of removing
the sampled labels that match the true labels by
making the classifier sure that they are sampled labels.
:: OpParams | |
-> Int64 | num_true: Number of true labels per context. |
-> Tensor v'1 Int64 | true_classes: The true_classes output of UnpackSparseLabels. |
-> Tensor v'2 Int64 | sampled_candidates: The sampled_candidates output of CandidateSampler. |
-> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float) | (indices, ids, weights)
|
:: TensorType t | |
=> Tensor v'1 Int32 | concat_dim: 0-D. The dimension along which to concatenate. Must be in the range [0, rank(values)). |
-> [Tensor v'2 t] | values: The |
-> Tensor Build t | output: A |
Concatenates tensors along one dimension.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 Int32 | concat_dim: 0-D. The dimension along which to concatenate. Must be in the range [0, rank(values)). |
-> [Tensor v'2 t] | values: The |
-> Tensor Build t | output: A |
:: Tensor v'1 Int32 | concat_dim: The dimension along which to concatenate. |
-> [Tensor v'2 Int32] | shape: The |
-> [Tensor Build Int32] | offset: The This is typically used by gradient computations for a concat operation. |
Computes offsets of concat inputs within its output.
For example:
```prettyprint
# x
is [2, 2, 7]
# y
is [2, 3, 7]
# z
is [2, 5, 7]
concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
```
:: OpParams | |
-> Tensor v'1 Int32 | concat_dim: The dimension along which to concatenate. |
-> [Tensor v'2 Int32] | shape: The |
-> [Tensor Build Int32] | offset: The This is typically used by gradient computations for a concat operation. |
:: (TensorType t, OneOf `[Int32, Int64]` tidx) | |
=> [Tensor v'1 t] | values: List of |
-> Tensor v'2 tidx | axis: 0-D. The dimension along which to concatenate. Must be in the range [-rank(values), rank(values)). |
-> Tensor Build t | output: A |
Concatenates tensors along one dimension.
:: (TensorType t, OneOf `[Int32, Int64]` tidx) | |
=> OpParams | |
-> [Tensor v'1 t] | values: List of |
-> Tensor v'2 tidx | axis: 0-D. The dimension along which to concatenate. Must be in the range [-rank(values), rank(values)). |
-> Tensor Build t | output: A |
:: MonadBuild m' | |
=> DataType | dtype: The type of the value being accumulated. |
-> Shape | shape: The shape of the values, can be [], in which case shape is unknown. |
-> m' (Tensor Ref ByteString) | handle: The handle to the accumulator. |
A conditional accumulator for aggregating gradients. The accumulator accepts
gradients marked with local_step greater or equal to the most recent global_step known to the accumulator. The average can be extracted from the accumulator, provided sufficient gradients have been accumulated. Extracting the average automatically resets the aggregate to 0, and increments the global_step recorded by the accumulator.
:: MonadBuild m' | |
=> OpParams | |
-> DataType | dtype: The type of the value being accumulated. |
-> Shape | shape: The shape of the values, can be [], in which case shape is unknown. |
-> m' (Tensor Ref ByteString) | handle: The handle to the accumulator. |
Returns the complex conjugate of a complex number.
Given a tensor input
of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in input
. The
complex numbers in input
must be of the form \(a + bj\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \(a - bj\).
For example:
```
# tensor input
is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
```
:: TensorType dtype | |
=> OpParams | |
-> Tensor Build dtype | output |
controlTrigger :: forall m'. MonadBuild m' => m' ControlNode
Does nothing. Serves as a control trigger for scheduling.
Only useful as a placeholder for control edges.
controlTrigger' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 t | input |
-> Tensor v'2 t | filter |
-> Tensor Build t | output |
Computes a 2-D convolution given 4-D input
and filter
tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]`, this op performs the following:
- Flattens the filter to a 2-D matrix with shape `[filter_height * filter_width * in_channels, output_channels]`.
- Extracts image patches from the input tensor to form a *virtual* tensor of shape `[batch, out_height, out_width, filter_height * filter_width * in_channels]`.
- For each patch, right-multiplies the filter matrix and the image patch vector.
In detail, with the default NHWC format,
output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, in_channels]`. |
-> Tensor v'2 Int32 | filter_sizes: An integer vector representing the tensor shape of |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. |
-> Tensor Build t | output: 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
the |
Computes the gradients of convolution with respect to the filter.
:: OneOf `[Word16, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, in_channels]`. |
-> Tensor v'2 Int32 | filter_sizes: An integer vector representing the tensor shape of |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. |
-> Tensor Build t | output: 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
the |
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 Int32 | input_sizes: An integer vector representing the shape of |
-> Tensor v'2 t | filter: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. |
-> Tensor Build t | output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient w.r.t. the input of the convolution. |
Computes the gradients of convolution with respect to the input.
:: OneOf `[Word16, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int32 | input_sizes: An integer vector representing the shape of |
-> Tensor v'2 t | filter: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. |
-> Tensor Build t | output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient w.r.t. the input of the convolution. |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | input: Shape `[batch, in_depth, in_height, in_width, in_channels]`. |
-> Tensor v'2 t | filter: Shape `[filter_depth, filter_height, filter_width, in_channels,
out_channels]`. |
-> Tensor Build t | output |
Computes a 3-D convolution given 5-D input
and filter
tensors.
In signal processing, cross-correlation is a measure of similarity of two waveforms as a function of a time-lag applied to one of them. This is also known as a sliding dot product or sliding inner-product.
Our Conv3D implements a form of cross-correlation.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: Shape `[batch, in_depth, in_height, in_width, in_channels]`. |
-> Tensor v'2 t | filter: Shape `[filter_depth, filter_height, filter_width, in_channels,
out_channels]`. |
-> Tensor Build t | output |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | input: Shape `[batch, depth, rows, cols, in_channels]`. |
-> Tensor v'2 t | filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
|
-> Tensor v'3 t | out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. |
-> Tensor Build t | output |
Computes the gradients of 3-D convolution with respect to the filter.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: Shape `[batch, depth, rows, cols, in_channels]`. |
-> Tensor v'2 t | filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
|
-> Tensor v'3 t | out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. |
-> Tensor Build t | output |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | input: Shape `[batch, depth, rows, cols, in_channels]`. |
-> Tensor v'2 Int32 | filter_sizes: An integer vector representing the tensor shape of |
-> Tensor v'3 t | out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. |
-> Tensor Build t | output |
Computes the gradients of 3-D convolution with respect to the filter.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: Shape `[batch, depth, rows, cols, in_channels]`. |
-> Tensor v'2 Int32 | filter_sizes: An integer vector representing the tensor shape of |
-> Tensor v'3 t | out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. |
-> Tensor Build t | output |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | input: Shape `[batch, depth, rows, cols, in_channels]`. |
-> Tensor v'2 t | filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
|
-> Tensor v'3 t | out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. |
-> Tensor Build t | output |
Computes the gradients of 3-D convolution with respect to the input.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: Shape `[batch, depth, rows, cols, in_channels]`. |
-> Tensor v'2 t | filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
|
-> Tensor v'3 t | out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. |
-> Tensor Build t | output |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 Int32 | input_sizes: An integer vector representing the tensor shape of |
-> Tensor v'2 t | filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
|
-> Tensor v'3 t | out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. |
-> Tensor Build t | output |
Computes the gradients of 3-D convolution with respect to the input.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int32 | input_sizes: An integer vector representing the tensor shape of |
-> Tensor v'2 t | filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
|
-> Tensor v'3 t | out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, out_channels]`. |
-> Tensor Build t | output |
:: TensorType t | |
=> Tensor v'1 t | input: Input tensor. |
-> Tensor Build t | output: Output tensor, deep-copied from input. |
Copy Op.
Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the device on which the tensor is allocated.
Unlike the CopyHost Op, this op does not have HostMemory constraint on its input or output.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | input: Input tensor. |
-> Tensor Build t | output: Output tensor, deep-copied from input. |
:: TensorType t | |
=> Tensor v'1 t | input: Input tensor. |
-> Tensor Build t | output: Output tensor, deep-copied from input. |
Copy Host Op.
Performs CPU-to-CPU deep-copying of tensor.
Unlike the Copy Op, this op has HostMemory constraint on its input or output.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | input: Input tensor. |
-> Tensor Build t | output: Output tensor, deep-copied from input. |
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes cos of x element-wise.
:: (MonadBuild m', OneOf `[Int32, Int64]` t) | |
=> Int64 | limit: If incrementing ref would bring it above limit, instead generates an
|
-> Tensor Ref t | ref: Should be from a scalar |
-> m' (Tensor Value t) | output: A copy of the input before increment. If nothing else modifies the input, the values produced will all be distinct. |
Increments ref
until it reaches limit
.
:: (MonadBuild m', OneOf `[Int32, Int64]` t) | |
=> OpParams | |
-> Int64 | limit: If incrementing ref would bring it above limit, instead generates an
|
-> Tensor Ref t | ref: Should be from a scalar |
-> m' (Tensor Value t) | output: A copy of the input before increment. If nothing else modifies the input, the values produced will all be distinct. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both |
-> Tensor v'2 Float | boxes: A 2-D tensor of shape `[num_boxes, 4]`. The |
-> Tensor v'3 Int32 | box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the |
-> Tensor v'4 Int32 | crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the image
content is not preserved. Both |
-> Tensor Build Float | crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. |
Extracts crops from the input image tensor and bilinearly resizes them (possibly
with aspect ratio change) to a common output size specified by crop_size
. This
is more general than the crop_to_bounding_box
op which extracts a fixed size
slice from the input image and does not allow resizing or aspect ratio change.
Returns a tensor with crops
from the input image
at positions defined at the
bounding box locations in boxes
. The cropped boxes are all resized (with
bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both |
-> Tensor v'2 Float | boxes: A 2-D tensor of shape `[num_boxes, 4]`. The |
-> Tensor v'3 Int32 | box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the |
-> Tensor v'4 Int32 | crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the image
content is not preserved. Both |
-> Tensor Build Float | crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 Float | grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. |
-> Tensor v'2 t | image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both |
-> Tensor v'3 Float | boxes: A 2-D tensor of shape `[num_boxes, 4]`. The |
-> Tensor v'4 Int32 | box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the |
-> Tensor Build Float | output: A 2-D tensor of shape `[num_boxes, 4]`. |
Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Float | grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. |
-> Tensor v'2 t | image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both |
-> Tensor v'3 Float | boxes: A 2-D tensor of shape `[num_boxes, 4]`. The |
-> Tensor v'4 Int32 | box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the |
-> Tensor Build Float | output: A 2-D tensor of shape `[num_boxes, 4]`. |
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 Float | grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. |
-> Tensor v'2 Float | boxes: A 2-D tensor of shape `[num_boxes, 4]`. The |
-> Tensor v'3 Int32 | box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the |
-> Tensor v'4 Int32 | image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]`
containing the original image size. Both |
-> Tensor Build t | output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. |
Computes the gradient of the crop_and_resize op wrt the input image tensor.
:: OneOf `[Word16, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Float | grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. |
-> Tensor v'2 Float | boxes: A 2-D tensor of shape `[num_boxes, 4]`. The |
-> Tensor v'3 Int32 | box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the |
-> Tensor v'4 Int32 | image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]`
containing the original image size. Both |
-> Tensor Build t | output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | a: A tensor containing 3-element vectors. |
-> Tensor v'2 t | b: Another tensor, of same type and shape as |
-> Tensor Build t | product: Pairwise cross product of the vectors in |
Compute the pairwise cross product.
a
and b
must be the same shape; they can either be simple 3-element vectors,
or any shape where the innermost dimension is 3. In the latter case, each pair
of corresponding 3-element vectors is cross-multiplied independently.
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | x |
-> Tensor v'2 tidx | axis |
-> Tensor Build t | out |
Compute the cumulative product of the tensor x
along axis
.
By default, this op performs an inclusive cumprod, which means that the first element of the input is identical to the first element of the output: ```prettyprint tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c] ```
By setting the exclusive
kwarg to True
, an exclusive cumprod is
performed instead:
```prettyprint
tf.cumprod([a, b, c], exclusive=True) ==> [0, a, a * b]
```
By setting the reverse
kwarg to True
, the cumprod is performed in the
opposite direction:
```prettyprint
tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The reverse
and exclusive
kwargs can also be combined:
```prettyprint
tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 0]
```
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | x |
-> Tensor v'2 tidx | axis |
-> Tensor Build t | out |
Compute the cumulative sum of the tensor x
along axis
.
By default, this op performs an inclusive cumsum, which means that the first element of the input is identical to the first element of the output: ```prettyprint tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c] ```
By setting the exclusive
kwarg to True
, an exclusive cumsum is
performed instead:
```prettyprint
tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
```
By setting the reverse
kwarg to True
, the cumsum is performed in the
opposite direction:
```prettyprint
tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The reverse
and exclusive
kwargs can also be combined:
```prettyprint
tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
```
:: TensorType t | |
=> Tensor v'1 t | input: Input tensor, non-Reference type. |
-> Tensor Build t | output: Output tensor that equals the input tensor. |
Debug Identity Op.
Provides an identity mapping of the non-Ref type input tensor for debugging.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | input: Input tensor, non-Reference type. |
-> Tensor Build t | output: Output tensor that equals the input tensor. |
:: TensorType t | |
=> Tensor v'1 t | input: Input tensor, non-Reference type. |
-> Tensor Build Int64 | output: An integer output tensor that is the number of NaNs in the input. |
Debug NaN Value Counter Op
Counts number of NaNs in the input tensor, for debugging.
:: TensorType t | |
=> Tensor v'1 t | input: Input tensor, non-Reference type, float or double. |
-> Tensor Build Double | output: A double tensor of shape [12], the elements of which are: [0]: is initialized (1.0) or not (0.0). [1]: total number of elements [2]: -inf count [3]: negative element count (excluding -inf) [4]: zero element count [5]: positive element count (excluding +inf) [6]: +inf element count [7]: NaN element count Output elements [1:8] are all zero, if the tensor is uninitialized. [8]: minimum of all non-inf and non-NaN elements. If uninitialized or no such element exists: +inf. [9]: maximum of all non-inf and non-NaN elements. If uninitialized or no such element exists: -inf. [10]: mean of all non-inf and non-NaN elements. If uninitialized or no such element exists: NaN. [11]: variance of all non-inf and non-NaN elements. If uninitialized or no such element exists: NaN. |
Debug Numeric Summary Op.
Provide a basic summary of numeric value types, range and distribution.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | input: Input tensor, non-Reference type, float or double. |
-> Tensor Build Double | output: A double tensor of shape [12], the elements of which are: [0]: is initialized (1.0) or not (0.0). [1]: total number of elements [2]: -inf count [3]: negative element count (excluding -inf) [4]: zero element count [5]: positive element count (excluding +inf) [6]: +inf element count [7]: NaN element count Output elements [1:8] are all zero, if the tensor is uninitialized. [8]: minimum of all non-inf and non-NaN elements. If uninitialized or no such element exists: +inf. [9]: maximum of all non-inf and non-NaN elements. If uninitialized or no such element exists: -inf. [10]: mean of all non-inf and non-NaN elements. If uninitialized or no such element exists: NaN. [11]: variance of all non-inf and non-NaN elements. If uninitialized or no such element exists: NaN. |
:: Tensor v'1 ByteString | input: Base64 strings to decode. |
-> Tensor Build ByteString | output: Decoded strings. |
Decode web-safe base64-encoded strings.
Input may or may not have padding at the end. See EncodeBase64 for padding. Web-safe means that input must use - and _ instead of + and /.
:: OpParams | |
-> Tensor v'1 ByteString | input: Base64 strings to decode. |
-> Tensor Build ByteString | output: Decoded strings. |
:: OneOfs `[ByteString, Int32, Int64, Float]` oUT_TYPE | |
=> Tensor v'1 ByteString | records: Each string is a record/row in the csv and all records should have the same format. |
-> TensorList v'2 oUT_TYPE | record_defaults: One tensor per column of the input record, with either a scalar default value for that column or empty if the column is required. |
-> TensorList Build oUT_TYPE | output: Each tensor will have the same shape as records. |
Convert CSV records to tensors. Each column maps to one tensor.
RFC 4180 format is expected for the CSV records. (https:/tools.ietf.orghtml/rfc4180) Note that we allow leading and trailing spaces with int or float field.
:: OneOfs `[ByteString, Int32, Int64, Float]` oUT_TYPE | |
=> OpParams | |
-> Tensor v'1 ByteString | records: Each string is a record/row in the csv and all records should have the same format. |
-> TensorList v'2 oUT_TYPE | record_defaults: One tensor per column of the input record, with either a scalar default value for that column or empty if the column is required. |
-> TensorList Build oUT_TYPE | output: Each tensor will have the same shape as records. |
:: Tensor v'1 ByteString | contents: 0-D. The GIF-encoded image. |
-> Tensor Build Word8 | image: 4-D with shape `[num_frames, height, width, 3]`. RGB order |
Decode the first frame of a GIF-encoded image to a uint8 tensor.
GIF with frame or transparency compression are not supported convert animated GIF from compressed to uncompressed by:
convert $src.gif -coalesce $dst.gif
:: Tensor v'1 ByteString | json_examples: Each string is a JSON object serialized according to the JSON mapping of the Example proto. |
-> Tensor Build ByteString | binary_examples: Each string is a binary Example protocol buffer corresponding
to the respective element of |
Convert JSON-encoded Example records to binary protocol buffer strings.
This op translates a tensor containing Example records, encoded using the standard JSON mapping, into a tensor containing the same records encoded as binary protocol buffers. The resulting tensor can then be fed to any of the other Example-parsing ops.
:: OpParams | |
-> Tensor v'1 ByteString | json_examples: Each string is a JSON object serialized according to the JSON mapping of the Example proto. |
-> Tensor Build ByteString | binary_examples: Each string is a binary Example protocol buffer corresponding
to the respective element of |
:: Tensor v'1 ByteString | contents: 0-D. The JPEG-encoded image. |
-> Tensor Build Word8 | image: 3-D with shape `[height, width, channels]`.. |
Decode a JPEG-encoded image to a uint8 tensor.
The attr channels
indicates the desired number of color channels for the
decoded image.
Accepted values are:
- 0: Use the number of channels in the JPEG-encoded image.
- 1: output a grayscale image.
- 3: output an RGB image.
If needed, the JPEG-encoded image is transformed to match the requested number of color channels.
The attr ratio
allows downscaling the image by an integer factor during
decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than
downscaling the image later.
:: OneOf `[Word16, Word8]` dtype | |
=> Tensor v'1 ByteString | contents: 0-D. The PNG-encoded image. |
-> Tensor Build dtype | image: 3-D with shape `[height, width, channels]`. |
Decode a PNG-encoded image to a uint8 or uint16 tensor.
The attr channels
indicates the desired number of color channels for the
decoded image.
Accepted values are:
- 0: Use the number of channels in the PNG-encoded image.
- 1: output a grayscale image.
- 3: output an RGB image.
- 4: output an RGBA image.
If needed, the PNG-encoded image is transformed to match the requested number of color channels.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` out_type | |
=> Tensor v'1 ByteString | bytes: All the elements must have the same length. |
-> Tensor Build out_type | output: A Tensor with one more dimension than the input |
Reinterpret the bytes of a string as a vector of numbers.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` out_type | |
=> OpParams | |
-> Tensor v'1 ByteString | bytes: All the elements must have the same length. |
-> Tensor Build out_type | output: A Tensor with one more dimension than the input |
:: MonadBuild m' | |
=> Tensor v'1 ByteString | handle: The handle for a tensor stored in the session state. |
-> m' ControlNode |
Delete the tensor specified by its handle in the session.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor v'1 ByteString | handle: The handle for a tensor stored in the session state. |
-> m' ControlNode |
:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t | |
=> Tensor v'1 t | set1: |
-> Tensor v'2 t | set2: |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (result_indices, result_values, result_shape)
|
Applies set operation along last dimension of 2 Tensor
inputs.
See SetOperationOp::SetOperationFromContext for values of set_operation
.
Output result
is a SparseTensor
represented by result_indices
,
result_values
, and result_shape
. For set1
and set2
ranked n
, this
has rank n
and the same 1st `n-1` dimensions as set1
and set2
. The nth
dimension contains the result of set_operation
applied to the corresponding
`[0...n-1]` dimension of set
.
:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t | |
=> OpParams | |
-> Tensor v'1 t | set1: |
-> Tensor v'2 t | set2: |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (result_indices, result_values, result_shape)
|
:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t | |
=> Tensor v'1 t | set1: |
-> Tensor v'2 Int64 | set2_indices: 2D |
-> Tensor v'3 t | set2_values: 1D |
-> Tensor v'4 Int64 | set2_shape: 1D |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (result_indices, result_values, result_shape)
|
Applies set operation along last dimension of Tensor
and SparseTensor
.
See SetOperationOp::SetOperationFromContext for values of set_operation
.
Input set2
is a SparseTensor
represented by set2_indices
, set2_values
,
and set2_shape
. For set2
ranked n
, 1st `n-1` dimensions must be the same
as set1
. Dimension n
contains values in a set, duplicates are allowed but
ignored.
If validate_indices
is True
, this op validates the order and range of set2
indices.
Output result
is a SparseTensor
represented by result_indices
,
result_values
, and result_shape
. For set1
and set2
ranked n
, this
has rank n
and the same 1st `n-1` dimensions as set1
and set2
. The nth
dimension contains the result of set_operation
applied to the corresponding
`[0...n-1]` dimension of set
.
:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t | |
=> OpParams | |
-> Tensor v'1 t | set1: |
-> Tensor v'2 Int64 | set2_indices: 2D |
-> Tensor v'3 t | set2_values: 1D |
-> Tensor v'4 Int64 | set2_shape: 1D |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (result_indices, result_values, result_shape)
|
:: TensorType t | |
=> Int64 | block_size: The size of the spatial block, same as in Space2Depth. |
-> Tensor v'1 t | input |
-> Tensor Build t | output |
DepthToSpace for tensors of type T.
Rearranges data from depth into blocks of spatial data.
This is the reverse transformation of SpaceToDepth. More specifically,
this op outputs a copy of the input tensor where values from the depth
dimension are moved in spatial blocks to the height
and width
dimensions.
The attr block_size
indicates the input block size and how the data is moved.
- Chunks of data of size `block_size * block_size` from depth are rearranged into non-overlapping blocks of size `block_size x block_size`
- The width the output tensor is `input_depth * block_size`, whereas the height is `input_height * block_size`.
- The depth of the input tensor must be divisible by `block_size * block_size`.
That is, assuming the input is in the shape: `[batch, height, width, depth]`, the shape of the output will be: `[batch, height*block_size, width*block_size, depth/(block_size*block_size)]`
This operation requires that the input tensor be of rank 4, and that
block_size
be >=1 and that `block_size * block_size` be a divisor of the
input depth.
This operation is useful for resizing the activations between convolutions (but keeping all data), e.g. instead of pooling. It is also useful for training purely convolutional models.
For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2:
```prettyprint x = [[[[1, 2, 3, 4]]]]
```
This operation will output a tensor of shape `[1, 2, 2, 1]`:
```prettyprint [[[[1], [2]], [[3], [4]]]] ```
Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, the corresponding output will have 2x2 elements and will have a depth of 1 channel (1 = `4 / (block_size * block_size)`). The output element shape is `[2, 2, 1]`.
For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
```prettyprint x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ```
This operation, for block size of 2, will return the following tensor of shape `[1, 2, 2, 3]`
```prettyprint [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
```
Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]] ```
the operator will return the following tensor of shape `[1 4 4 1]`:
```prettyprint x = [[ [1], [2], [5], [6]], [ [3], [4], [7], [8]], [ [9], [10], [13], [14]], [ [11], [12], [15], [16]]]
```
Computes a 2-D depthwise convolution given 4-D input
and filter
tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
in_channels
convolutional filters of depth 1, depthwise_conv2d
applies
a different filter to each input channel (expanding from 1 channel to
channel_multiplier
channels for each), then concatenates the results
together. Thus, the output has `in_channels * channel_multiplier` channels.
for k in 0..in_channels-1 for q in 0..channel_multiplier-1 output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * filter[di, dj, k, q]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
depthwiseConv2dNativeBackpropFilter
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, in_channels]`. |
-> Tensor v'2 Int32 | filter_sizes: An integer vector representing the tensor shape of |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. |
-> Tensor Build t | output: 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
the |
Computes the gradients of depthwise convolution with respect to the filter.
depthwiseConv2dNativeBackpropFilter'
:: OneOf `[Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, in_channels]`. |
-> Tensor v'2 Int32 | filter_sizes: An integer vector representing the tensor shape of |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. |
-> Tensor Build t | output: 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
the |
depthwiseConv2dNativeBackpropInput
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 Int32 | input_sizes: An integer vector representing the shape of |
-> Tensor v'2 t | filter: 4-D with shape `[filter_height, filter_width, in_channels, depthwise_multiplier]`. |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. |
-> Tensor Build t | output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient w.r.t. the input of the convolution. |
Computes the gradients of depthwise convolution with respect to the input.
depthwiseConv2dNativeBackpropInput'
:: OneOf `[Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int32 | input_sizes: An integer vector representing the shape of |
-> Tensor v'2 t | filter: 4-D with shape `[filter_height, filter_width, in_channels, depthwise_multiplier]`. |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. |
-> Tensor Build t | output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient w.r.t. the input of the convolution. |
:: OneOf `[Int16, Int32, Word16, Word8]` t | |
=> Tensor v'1 t | input |
-> Tensor v'2 Float | min_range: The minimum scalar value possibly produced for the input. |
-> Tensor v'3 Float | max_range: The maximum scalar value possibly produced for the input. |
-> Tensor Build Float | output |
Dequantize the input
tensor into a float Tensor.
- min_range, max_range
- are scalar floats that specify the range for
the
input
data. Themode
attribute controls exactly which calculations are used to convert the float values to their quantized equivalents.
In MIN_COMBINED
mode, each value of the tensor will undergo the following:
``` if T == qint8, in[i] += (range(T) + 1)/ 2.0 out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) ``` here `range(T) = numeric_limitsT::max() - numeric_limitsT::min()`
- MIN_COMBINED Mode Example*
If the input comes from a QuantizedRelu6, the output type is quint8 (range of 0-255) but the possible range of QuantizedRelu6 is 0-6. The min_range and max_range values are therefore 0.0 and 6.0. Dequantize on quint8 will take each value, cast to float, and multiply by 6 / 255. Note that if quantizedtype is qint8, the operation will additionally add each value by 128 prior to casting.
If the mode is MIN_FIRST
, then this approach is used:
``` number_of_steps = 1 << (# of bits in T) range_adjust = number_of_steps / (number_of_steps - 1) range = (range_max - range_min) * range_adjust range_scale = range / number_of_steps const double offset_input = static_castdouble(input) - lowest_quantized; result = range_min + ((input - numeric_limitsT::min()) * range_scale) ```
:: TensorType dtype | |
=> Tensor v'1 ByteString | serialized_sparse: 2-D, The |
-> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64) | (sparse_indices, sparse_values, sparse_shape)
|
Deserialize and concatenate SparseTensors
from a serialized minibatch.
The input serialized_sparse
must be a string matrix of shape `[N x 3]` where
N
is the minibatch size and the rows correspond to packed outputs of
SerializeSparse
. The ranks of the original SparseTensor
objects
must all match. When the final SparseTensor
is created, it has rank one
higher than the ranks of the incoming SparseTensor
objects
(they have been concatenated along a new row dimension).
The output SparseTensor
object's shape values for all dimensions but the
first are the max across the input SparseTensor
objects' shape values
for the corresponding dimensions. Its first shape value is N
, the minibatch
size.
The input SparseTensor
objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run SparseReorder
to restore index ordering.
For example, if the serialized input is a `[2 x 3]` matrix representing two
original SparseTensor
objects:
index = [ 0] [10] [20] values = [1, 2, 3] shape = [50]
and
index = [ 2] [10] values = [4, 5] shape = [30]
then the final deserialized SparseTensor
will be:
index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50]
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref t | ref: A reference to the temporary variable tensor. |
-> m' (Tensor Value t) | value |
Destroys the temporary variable and returns its final value.
Sets output to the value of the Tensor pointed to by ref
, then destroys
the temporary variable called var_name
.
All other uses of ref
*must* have executed before this op.
This is typically achieved by chaining the ref through each assign op, or by
using control dependencies.
Outputs the final value of the tensor pointed to by ref
.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor Ref t | ref: A reference to the temporary variable tensor. |
-> m' (Tensor Value t) | value |
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t | |
=> Tensor v'1 t | diagonal: Rank k tensor where k is at most 3. |
-> Tensor Build t | output |
Returns a diagonal tensor with a given diagonal values.
Given a diagonal
, this operation returns a tensor with the diagonal
and
everything else padded with zeros. The diagonal is computed as follows:
Assume diagonal
has dimensions [D1,..., Dk], then the output is a tensor of
rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
For example:
```prettyprint
# diagonal
is [1, 2, 3, 4]
tf.diag(diagonal) ==> [[1, 0, 0, 0]
[0, 2, 0, 0]
[0, 0, 3, 0]
[0, 0, 0, 4]]
```
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t | |
=> Tensor v'1 t | input: Rank k tensor where k is 2, 4, or 6. |
-> Tensor Build t | diagonal: The extracted diagonal. |
Returns the diagonal part of the tensor.
This operation returns a tensor with the diagonal
part
of the input
. The diagonal
part is computed as follows:
Assume input
has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
tensor of rank k
with dimensions `[D1,..., Dk]` where:
`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
For example:
```prettyprint
# input
is [[1, 0, 0, 0]
[0, 2, 0, 0]
[0, 0, 3, 0]
[0, 0, 0, 4]]
tf.diag_part(input) ==> [1, 2, 3, 4] ```
Computes Psi, the derivative of Lgamma (the log of the absolute value of
`Gamma(x)`), element-wise.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, depth]`. |
-> Tensor v'2 t | filter: 3-D with shape `[filter_height, filter_width, depth]`. |
-> Tensor Build t | output: 4-D with shape `[batch, out_height, out_width, depth]`. |
Computes the grayscale dilation of 4-D input
and 3-D filter
tensors.
The input
tensor has shape `[batch, in_height, in_width, depth]` and the
filter
tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own structuring
function. The output
tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the padding
algorithm. We currently only support the default
NHWC data_format
.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with conv2d
, we use unmirrored filters):
output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling kernel size and contains all zeros.
Note on duality: The dilation of input
by the filter
is equal to the
negation of the erosion of `-input` by the reflected filter
.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, depth]`. |
-> Tensor v'2 t | filter: 3-D with shape `[filter_height, filter_width, depth]`. |
-> Tensor Build t | output: 4-D with shape `[batch, out_height, out_width, depth]`. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, depth]`. |
-> Tensor v'2 t | filter: 3-D with shape `[filter_height, filter_width, depth]`. |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. |
-> Tensor Build t | filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`. |
Computes the gradient of morphological 2-D dilation with respect to the filter.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, depth]`. |
-> Tensor v'2 t | filter: 3-D with shape `[filter_height, filter_width, depth]`. |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. |
-> Tensor Build t | filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, depth]`. |
-> Tensor v'2 t | filter: 3-D with shape `[filter_height, filter_width, depth]`. |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. |
-> Tensor Build t | in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`. |
Computes the gradient of morphological 2-D dilation with respect to the input.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, depth]`. |
-> Tensor v'2 t | filter: 3-D with shape `[filter_height, filter_width, depth]`. |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. |
-> Tensor Build t | in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`. |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Returns x / y element-wise.
- NOTE*:
Div
supports broadcasting. More about broadcasting here
:: OneOf `[Word16, Float]` t | |
=> Tensor v'1 t | images: 4-D with shape `[batch, height, width, depth]`. A batch of images. |
-> Tensor v'2 Float | boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding boxes. |
-> Tensor Build t | output: 4-D with the same shape as |
Draw bounding boxes on a batch of images.
Outputs a copy of images
but draws on top of the pixels zero or more bounding
boxes specified by the locations in boxes
. The coordinates of the each
bounding box in boxes
are encoded as `[y_min, x_min, y_max, x_max]`. The
bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
height of the underlying image.
For example, if an image is 100 x 200 pixels and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the bounding box will be `(10, 40)` to `(50, 180)`.
Parts of the bounding box may fall outside the image.
:: OneOf `[Word16, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | images: 4-D with shape `[batch, height, width, depth]`. A batch of images. |
-> Tensor v'2 Float | boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding boxes. |
-> Tensor Build t | output: 4-D with the same shape as |
:: TensorType t | |
=> Int64 | num_partitions: The number of partitions to output. |
-> Tensor v'1 t | data |
-> Tensor v'2 Int32 | partitions: Any shape. Indices in the range `[0, num_partitions)`. |
-> [Tensor Build t] | outputs |
Partitions `data` into num_partitions
tensors using indices from partitions
.
For each index tuple js
of size `partitions.ndim`, the slice `data[js, ...]`
becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i`
are placed in `outputs[i]` in lexicographic order of js
, and the first
dimension of `outputs[i]` is the number of entries in partitions
equal to i
.
In detail,
```python outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) ```
`data.shape` must start with `partitions.shape`.
For example:
```python # Scalar partitions. partitions = 1 num_partitions = 2 data = [10, 20] outputs[0] = [] # Empty with shape [0, 2] outputs[1] = [[10, 20]]
# Vector partitions. partitions = [0, 0, 1, 1, 0] num_partitions = 2 data = [10, 20, 30, 40, 50] outputs[0] = [10, 20, 50] outputs[1] = [30, 40] ```
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/DynamicPartition.png" alt /div
Interleave the values from the `data` tensors into a single tensor.
Builds a merged tensor such that
```python merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] ```
For example, if each `indices[m]` is scalar or vector, we have
```python # Scalar indices: merged[indices[m], ...] = data[m][...]
# Vector indices: merged[indices[m][i], ...] = data[m][i, ...] ```
Each `data[i].shape` must start with the corresponding `indices[i].shape`,
and the rest of `data[i].shape` must be constant w.r.t. i
. That is, we
must have `data[i].shape = indices[i].shape + constant`. In terms of this
constant
, the output shape is
merged.shape = [max(indices)] + constant
Values are merged in order, so if an index appears in both `indices[m][i]` and `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the merged result.
For example:
```python indices[0] = 6 indices[1] = [4, 1] indices[2] = [[5, 2], [0, 3]] data[0] = [61, 62] data[1] = [[41, 42], [11, 12]] data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], [51, 52], [61, 62]] ```
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/DynamicStitch.png" alt /div
:: TensorType t | |
=> Tensor v'1 Int64 | hypothesis_indices: The indices of the hypothesis list SparseTensor. This is an N x R int64 matrix. |
-> Tensor v'2 t | hypothesis_values: The values of the hypothesis list SparseTensor. This is an N-length vector. |
-> Tensor v'3 Int64 | hypothesis_shape: The shape of the hypothesis list SparseTensor. This is an R-length vector. |
-> Tensor v'4 Int64 | truth_indices: The indices of the truth list SparseTensor. This is an M x R int64 matrix. |
-> Tensor v'5 t | truth_values: The values of the truth list SparseTensor. This is an M-length vector. |
-> Tensor v'6 Int64 | truth_shape: truth indices, vector. |
-> Tensor Build Float | output: A dense float tensor with rank R - 1. For the example input: // hypothesis represents a 2x1 matrix with variable-length values: // (0,0) = ["a"] // (1,0) = ["b"] hypothesis_indices = [[0, 0, 0], [1, 0, 0]] hypothesis_values = ["a", "b"] hypothesis_shape = [2, 1, 1] // truth represents a 2x2 matrix with variable-length values: // (0,0) = [] // (0,1) = ["a"] // (1,0) = ["b", "c"] // (1,1) = ["a"] truth_indices = [[0, 1, 0], [1, 0, 0], [1, 0, 1], [1, 1, 0]] truth_values = ["a", "b", "c", "a"] truth_shape = [2, 2, 2] normalize = true The output will be: // output is a 2x2 matrix with edit distances normalized by truth lengths. output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis |
Computes the (possibly normalized) Levenshtein Edit Distance.
The inputs are variable-length sequences provided by SparseTensors (hypothesis_indices, hypothesis_values, hypothesis_shape) and (truth_indices, truth_values, truth_shape).
The inputs are:
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 Int64 | hypothesis_indices: The indices of the hypothesis list SparseTensor. This is an N x R int64 matrix. |
-> Tensor v'2 t | hypothesis_values: The values of the hypothesis list SparseTensor. This is an N-length vector. |
-> Tensor v'3 Int64 | hypothesis_shape: The shape of the hypothesis list SparseTensor. This is an R-length vector. |
-> Tensor v'4 Int64 | truth_indices: The indices of the truth list SparseTensor. This is an M x R int64 matrix. |
-> Tensor v'5 t | truth_values: The values of the truth list SparseTensor. This is an M-length vector. |
-> Tensor v'6 Int64 | truth_shape: truth indices, vector. |
-> Tensor Build Float | output: A dense float tensor with rank R - 1. For the example input: // hypothesis represents a 2x1 matrix with variable-length values: // (0,0) = ["a"] // (1,0) = ["b"] hypothesis_indices = [[0, 0, 0], [1, 0, 0]] hypothesis_values = ["a", "b"] hypothesis_shape = [2, 1, 1] // truth represents a 2x2 matrix with variable-length values: // (0,0) = [] // (0,1) = ["a"] // (1,0) = ["b", "c"] // (1,1) = ["a"] truth_indices = [[0, 1, 0], [1, 0, 0], [1, 0, 1], [1, 1, 0]] truth_values = ["a", "b", "c", "a"] truth_shape = [2, 2, 2] normalize = true The output will be: // output is a 2x2 matrix with edit distances normalized by truth lengths. output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | features |
-> Tensor Build t | activations |
Computes exponential linear: `exp(features) - 1` if < 0, features
otherwise.
See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | gradients: The backpropagated gradients to the corresponding Elu operation. |
-> Tensor v'2 t | outputs: The outputs of the corresponding Elu operation. |
-> Tensor Build t | backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0,
|
Computes gradients for the exponential linear (Elu) operation.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | gradients: The backpropagated gradients to the corresponding Elu operation. |
-> Tensor v'2 t | outputs: The outputs of the corresponding Elu operation. |
-> Tensor Build t | backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0,
|
:: Tensor v'1 ByteString | input: Strings to be encoded. |
-> Tensor Build ByteString | output: Input strings encoded in base64. |
Encode strings into web-safe base64 format.
Refer to the following article for more information on base64 format: en.wikipedia.orgwikiBase64. Base64 strings may have padding with '=' at the end so that the encoded has length multiple of 4. See Padding section of the link above.
Web-safe means that the encoder uses - and _ instead of + and /.
:: OpParams | |
-> Tensor v'1 ByteString | input: Strings to be encoded. |
-> Tensor Build ByteString | output: Input strings encoded in base64. |
:: Tensor v'1 Word8 | image: 3-D with shape `[height, width, channels]`. |
-> Tensor Build ByteString | contents: 0-D. JPEG-encoded image. |
JPEG-encode an image.
image
is a 3-D uint8 Tensor of shape `[height, width, channels]`.
The attr format
can be used to override the color format of the encoded
output. Values can be:
- `''`: Use a default format based on the number of channels in the image.
grayscale
: Output a grayscale JPEG image. Thechannels
dimension ofimage
must be 1.rgb
: Output an RGB JPEG image. Thechannels
dimension ofimage
must be 3.
If format
is not specified or is the empty string, a default format is picked
in function of the number of channels in image
:
- 1: Output a grayscale image.
- 3: Output an RGB image.
:: OneOf `[Word16, Word8]` t | |
=> Tensor v'1 t | image: 3-D with shape `[height, width, channels]`. |
-> Tensor Build ByteString | contents: 0-D. PNG-encoded image. |
PNG-encode an image.
image
is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
where channels
is:
- 1: for grayscale.
- 2: for grayscale + alpha.
- 3: for RGB.
- 4: for RGBA.
The ZLIB compression level, compression
, can be -1 for the PNG-encoder
default or a value from 0 to 9. 9 is the highest compression level, generating
the smallest output, but is slower.
:: TensorType t | |
=> Tensor v'1 t | data: The tensor to be made available to the child frame. |
-> Tensor Build t | output: The same tensor as `data`. |
Creates or finds a child frame, and makes `data` available to the child frame.
This op is used together with Exit
to create loops in the graph.
The unique frame_name
is used by the Executor
to identify frames. If
is_constant
is true, output
is a constant in the child frame; otherwise
it may be changed in the child frame. At most parallel_iterations
iterations
are run in parallel in the child frame.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | data: The tensor to be made available to the child frame. |
-> Tensor Build t | output: The same tensor as `data`. |
:: OneOf `[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build Bool | z |
Returns the truth value of (x == y) element-wise.
- NOTE*:
Equal
supports broadcasting. More about broadcasting here
Computes the Gauss error function of x
element-wise.
Computes the complementary error function of x
element-wise.
:: TensorType t | |
=> Tensor v'1 t | data: The tensor to be made available to the parent frame. |
-> Tensor Build t | output: The same tensor as `data`. |
Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | data: The tensor to be made available to the parent frame. |
-> Tensor Build t | output: The same tensor as `data`. |
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes exponential of x element-wise. \(y = e^x\).
:: (TensorType t, OneOf `[Int32, Int64]` tdim) | |
=> Tensor v'1 t | input |
-> Tensor v'2 tdim | dim: 0-D (scalar). Specifies the dimension index at which to
expand the shape of |
-> Tensor Build t | output: Contains the same data as |
Inserts a dimension of 1 into a tensor's shape.
Given a tensor input
, this operation inserts a dimension of 1 at the
dimension index dim
of input
's shape. The dimension index dim
starts at
zero; if you specify a negative number for dim
it is counted backward from
the end.
This operation is useful if you want to add a batch dimension to a single element. For example, if you have a single image of shape `[height, width, channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, which will make the shape `[1, height, width, channels]`.
Other examples:
```prettyprint
# t
is a tensor of shape [2]
shape(expand_dims(t, 0)) ==> [1, 2]
shape(expand_dims(t, 1)) ==> [2, 1]
shape(expand_dims(t, -1)) ==> [2, 1]
# t2
is a tensor of shape [2, 3, 5]
shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of size 1.
:: (TensorType t, OneOf `[Int32, Int64]` tdim) | |
=> OpParams | |
-> Tensor v'1 t | input |
-> Tensor v'2 tdim | dim: 0-D (scalar). Specifies the dimension index at which to
expand the shape of |
-> Tensor Build t | output: Contains the same data as |
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes exponential of x - 1 element-wise.
I.e., \(y = (exp x) - 1\).
:: Tensor v'1 Float | input: A 4-D float tensor of shape `[batch_size, height, width, channels]`. |
-> Tensor v'2 Int32 | size: A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. |
-> Tensor v'3 Float | offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing the x, y locations of the center of each window. |
-> Tensor Build Float | glimpse: A tensor representing the glimpses `[batch_size, glimpse_height, glimpse_width, channels]`. |
Extracts a glimpse from the input tensor.
Returns a set of windows called glimpses extracted at location
offsets
from the input tensor. If the windows only partially
overlaps the inputs, the non overlapping areas will be filled with
random noise.
The result is a 4-D tensor of shape `[batch_size, glimpse_height,
glimpse_width, channels]`. The channels and batch dimensions are the
same as that of the input tensor. The height and width of the output
windows are specified in the size
parameter.
The argument normalized
and centered
controls how the windows are built:
- If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension.
- If the coordinates are both normalized and centered, they range from
- 1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0).
- If the coordinates are not normalized they are interpreted as numbers of pixels.
:: OpParams | |
-> Tensor v'1 Float | input: A 4-D float tensor of shape `[batch_size, height, width, channels]`. |
-> Tensor v'2 Int32 | size: A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. |
-> Tensor v'3 Float | offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing the x, y locations of the center of each window. |
-> Tensor Build Float | glimpse: A tensor representing the glimpses `[batch_size, glimpse_height, glimpse_width, channels]`. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. |
-> Tensor Build t | patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * ksize_cols * depth]` containing image patches with size `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. |
Extract patches
from images
and put them in the "depth" output dimension.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. |
-> Tensor Build t | patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * ksize_cols * depth]` containing image patches with size `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. |
:: Tensor v'1 (Complex Float) | input: A complex64 tensor. |
-> Tensor Build (Complex Float) | output: A complex64 tensor of the same shape as |
Compute the 1-dimensional discrete Fourier Transform over the inner-most
dimension of input
.
:: Tensor v'1 (Complex Float) | input: A complex64 tensor. |
-> Tensor Build (Complex Float) | output: A complex64 tensor of the same shape as
|
Compute the 2-dimensional discrete Fourier Transform over the inner-most
2 dimensions of input
.
:: Tensor v'1 (Complex Float) | input: A complex64 tensor. |
-> Tensor Build (Complex Float) | output: A complex64 tensor of the same shape as
|
Compute the 3-dimensional discrete Fourier Transform over the inner-most 3
dimensions of input
.
:: MonadBuild m' | |
=> [DataType] | component_types: The type of each component in a value. |
-> m' (Tensor Ref ByteString) | handle: The handle to the queue. |
A queue that produces elements in first-in first-out order.
:: MonadBuild m' | |
=> OpParams | |
-> [DataType] | component_types: The type of each component in a value. |
-> m' (Tensor Ref ByteString) | handle: The handle to the queue. |
:: MonadBuild m' | |
=> [DataType] | component_types: The type of each component in a value. |
-> m' ResourceHandle | handle: The handle to the queue. |
A queue that produces elements in first-in first-out order.
:: MonadBuild m' | |
=> OpParams | |
-> [DataType] | component_types: The type of each component in a value. |
-> m' ResourceHandle | handle: The handle to the queue. |
:: OpParams | |
-> Tensor Build ByteString | fact |
Fake-quantize the inputs
tensor, type float to outputs
tensor of same type.
Attributes [min; max] define the clamping range for the inputs
data. Op
divides this range into 255 steps (total of 256 values), then replaces each
inputs
value with the closest of the quantized step values.
Quantization is called fake since the output is still in floating point.
fakeQuantWithMinMaxArgsGradient
:: Tensor v'1 Float | gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. |
-> Tensor v'2 Float | inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation. |
-> Tensor Build Float | backprops: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: `gradients * (inputs >= min && inputs <= max)`. |
Compute gradients for a FakeQuantWithMinMaxArgs operation.
fakeQuantWithMinMaxArgsGradient'
:: OpParams | |
-> Tensor v'1 Float | gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. |
-> Tensor v'2 Float | inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation. |
-> Tensor Build Float | backprops: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: `gradients * (inputs >= min && inputs <= max)`. |
Fake-quantize the inputs
tensor of type float and shape `[b, h, w, d]` via
global float scalars min
and max
to outputs
tensor of same shape as
inputs
.
- min; max
- is the clamping range for the
inputs
data. Op divides this range into 255 steps (total of 256 values), then replaces eachinputs
value with the closest of the quantized step values.
This operation has a gradient and thus allows for training min
and max
values.
fakeQuantWithMinMaxVarsGradient
:: Tensor v'1 Float | gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation. |
-> Tensor v'2 Float | inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. min, max: Quantization interval, scalar floats. |
-> Tensor v'3 Float | min |
-> Tensor v'4 Float | max |
-> (Tensor Build Float, Tensor Build Float, Tensor Build Float) | (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)
|
Compute gradients for a FakeQuantWithMinMaxVars operation.
fakeQuantWithMinMaxVarsGradient'
:: OpParams | |
-> Tensor v'1 Float | gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation. |
-> Tensor v'2 Float | inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. min, max: Quantization interval, scalar floats. |
-> Tensor v'3 Float | min |
-> Tensor v'4 Float | max |
-> (Tensor Build Float, Tensor Build Float, Tensor Build Float) | (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)
|
fakeQuantWithMinMaxVarsPerChannel
Fake-quantize the inputs
tensor of type float and one of the shapes: `[d]`,
`[b, d]` `[b, h, w, d]` via per-channel floats min
and max
of shape `[d]`
to outputs
tensor of same shape as inputs
.
- min; max
- is the clamping range for the
inputs
data in the corresponding depth channel. Op divides this range into 255 steps (total of 256 values), then replaces eachinputs
value with the closest of the quantized step values.
This operation has a gradient and thus allows for training min
and max
values.
fakeQuantWithMinMaxVarsPerChannelGradient
:: Tensor v'1 Float | gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. |
-> Tensor v'2 Float | inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
same as |
-> Tensor v'3 Float | min |
-> Tensor v'4 Float | max |
-> (Tensor Build Float, Tensor Build Float, Tensor Build Float) | (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)
|
Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
fakeQuantWithMinMaxVarsPerChannelGradient'
:: OpParams | |
-> Tensor v'1 Float | gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. |
-> Tensor v'2 Float | inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
same as |
-> Tensor v'3 Float | min |
-> Tensor v'4 Float | max |
-> (Tensor Build Float, Tensor Build Float, Tensor Build Float) | (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)
|
:: MonadBuild m' | |
=> ResourceHandle | resource |
-> m' (Tensor Ref ByteString) | handle |
Deprecated. Do not use.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | resource |
-> m' (Tensor Ref ByteString) | handle |
:: TensorType t | |
=> Tensor v'1 Int32 | dims: 1-D. Represents the shape of the output tensor. |
-> Tensor v'2 t | value: 0-D (scalar). Value to fill the returned tensor.
|
-> Tensor Build t | output |
Creates a tensor filled with a scalar value.
This operation creates a tensor of shape dims
and fills it with value
.
For example:
```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9) ==> [[9, 9, 9] [9, 9, 9]] ```
:: MonadBuild m' | |
=> Int64 | record_bytes |
-> m' (Tensor Ref ByteString) | reader_handle: The handle to reference the Reader. |
A Reader that outputs fixed-length records from a file.
:: MonadBuild m' | |
=> OpParams | |
-> Int64 | record_bytes |
-> m' (Tensor Ref ByteString) | reader_handle: The handle to reference the Reader. |
:: MonadBuild m' | |
=> Int64 | record_bytes |
-> m' ResourceHandle | reader_handle: The handle to reference the Reader. |
A Reader that outputs fixed-length records from a file.
:: MonadBuild m' | |
=> OpParams | |
-> Int64 | record_bytes |
-> m' ResourceHandle | reader_handle: The handle to reference the Reader. |
:: Int64 | num_sampled: Number of candidates to randomly sample per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Int64 | range_max: The sampler will sample integers from the interval [0, range_max). |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
Generates labels for candidate sampling with a learned unigram distribution.
A unigram sampler could use a fixed unigram distribution read from a file or passed in as an in-memory array instead of building up the distribution from data on the fly. There is also an option to skew the distribution by applying a distortion power to the weights.
The vocabulary file should be in CSV-like format, with the last field being the weight associated with the word.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
:: OpParams | |
-> Int64 | num_sampled: Number of candidates to randomly sample per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Int64 | range_max: The sampler will sample integers from the interval [0, range_max). |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
Returns element-wise largest integer not greater than x.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Returns x // y element-wise.
- NOTE*:
FloorDiv
supports broadcasting. More about broadcasting here
Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
true, this follows Python semantics in that the result here is consistent with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
- NOTE*:
FloorMod
supports broadcasting. More about broadcasting here
:: OneOf `[Int32, Int64, Double, Float]` t | |
=> Tensor v'1 t | value: 4-D with shape `[batch, height, width, channels]`. |
-> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) | (output, row_pooling_sequence, col_pooling_sequence)
|
Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling region generation step. The only difference is that after pooling regions are generated, a mean operation is performed instead of a max operation in each pooling region.
:: OneOf `[Int32, Int64, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | value: 4-D with shape `[batch, height, width, channels]`. |
-> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) | (output, row_pooling_sequence, col_pooling_sequence)
|
:: OneOf `[Int32, Int64, Double, Float]` t | |
=> Tensor v'1 Int64 | orig_input_tensor_shape: Original input tensor shape for |
-> Tensor v'2 t | out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients
w.r.t. the output of |
-> Tensor v'3 Int64 | row_pooling_sequence: row pooling sequence, form pooling region with col_pooling_sequence. |
-> Tensor v'4 Int64 | col_pooling_sequence: column pooling sequence, form pooling region with row_pooling sequence. |
-> Tensor Build t | output: 4-D. Gradients w.r.t. the input of |
Computes gradient of the FractionalAvgPool function.
Unlike FractionalMaxPoolGrad, we don't need to find arg_max for FractionalAvgPoolGrad, we just need to evenly back-propagate each element of out_backprop to those indices that form the same pooling cell. Therefore, we just need to know the shape of original input tensor, instead of the whole tensor.
:: OneOf `[Int32, Int64, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int64 | orig_input_tensor_shape: Original input tensor shape for |
-> Tensor v'2 t | out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients
w.r.t. the output of |
-> Tensor v'3 Int64 | row_pooling_sequence: row pooling sequence, form pooling region with col_pooling_sequence. |
-> Tensor v'4 Int64 | col_pooling_sequence: column pooling sequence, form pooling region with row_pooling sequence. |
-> Tensor Build t | output: 4-D. Gradients w.r.t. the input of |
:: OneOf `[Int32, Int64, Double, Float]` t | |
=> Tensor v'1 t | value: 4-D with shape `[batch, height, width, channels]`. |
-> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) | (output, row_pooling_sequence, col_pooling_sequence)
|
Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In regular max pooling, you downsize an input set by taking the maximum value of smaller N x N subsections of the set (often 2x2), and try to reduce the set by a factor of N, where N is an integer. Fractional max pooling, as you might expect from the word "fractional", means that the overall reduction ratio N does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly uniform. For example, let's look at the height dimension, and the constraints on the list of rows that will be pool boundaries.
First we define the following:
- input_row_length : the number of rows from the input set
- output_row_length : which will be smaller than the input
- alpha = input_row_length / output_row_length : our reduction ratio
- K = floor(alpha)
- row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
- a[0] = 0 : the first value of the sequence is 0
- a[end] = input_row_length : the last value of the sequence is the size
- K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
- length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: Benjamin Graham, Fractional Max-Pooling
:: OneOf `[Int32, Int64, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | value: 4-D with shape `[batch, height, width, channels]`. |
-> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) | (output, row_pooling_sequence, col_pooling_sequence)
|
:: OneOf `[Int32, Int64, Double, Float]` t | |
=> Tensor v'1 t | orig_input: Original input for |
-> Tensor v'2 t | orig_output: Original output for |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients
w.r.t. the output of |
-> Tensor v'4 Int64 | row_pooling_sequence: row pooling sequence, form pooling region with col_pooling_sequence. |
-> Tensor v'5 Int64 | col_pooling_sequence: column pooling sequence, form pooling region with row_pooling sequence. |
-> Tensor Build t | output: 4-D. Gradients w.r.t. the input of |
Computes gradient of the FractionalMaxPool function.
:: OneOf `[Int32, Int64, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | orig_input: Original input for |
-> Tensor v'2 t | orig_output: Original output for |
-> Tensor v'3 t | out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients
w.r.t. the output of |
-> Tensor v'4 Int64 | row_pooling_sequence: row pooling sequence, form pooling region with col_pooling_sequence. |
-> Tensor v'5 Int64 | col_pooling_sequence: column pooling sequence, form pooling region with row_pooling sequence. |
-> Tensor Build t | output: 4-D. Gradients w.r.t. the input of |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x: A 4D Tensor for input data. |
-> Tensor v'2 t | scale: A 1D Tensor for scaling factor, to scale the normalized x. |
-> Tensor v'3 t | offset: A 1D Tensor for offset, to shift to the normalized x. |
-> Tensor v'4 t | mean: A 1D Tensor for population mean. Used for inference only; must be empty for training. |
-> Tensor v'5 t | variance: A 1D Tensor for population variance. Used for inference only; must be empty for training. |
-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) | (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2)
|
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | x: A 4D Tensor for input data. |
-> Tensor v'2 t | scale: A 1D Tensor for scaling factor, to scale the normalized x. |
-> Tensor v'3 t | offset: A 1D Tensor for offset, to shift to the normalized x. |
-> Tensor v'4 t | mean: A 1D Tensor for population mean. Used for inference only; must be empty for training. |
-> Tensor v'5 t | variance: A 1D Tensor for population variance. Used for inference only; must be empty for training. |
-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) | (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2)
|
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | y_backprop: A 4D Tensor for the gradient with respect to y. |
-> Tensor v'2 t | x: A 4D Tensor for input data. |
-> Tensor v'3 t | scale: A 1D Tensor for scaling factor, to scale the normalized x. |
-> Tensor v'4 t | reserve_space_1: A 1D Tensor for the computed batch mean, to be reused in the gradient computation. |
-> Tensor v'5 t | reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance in the cuDNN case), to be used in the gradient computation. |
-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) | (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4)
|
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | y_backprop: A 4D Tensor for the gradient with respect to y. |
-> Tensor v'2 t | x: A 4D Tensor for input data. |
-> Tensor v'3 t | scale: A 1D Tensor for scaling factor, to scale the normalized x. |
-> Tensor v'4 t | reserve_space_1: A 1D Tensor for the computed batch mean, to be reused in the gradient computation. |
-> Tensor v'5 t | reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance in the cuDNN case), to be used in the gradient computation. |
-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) | (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4)
|
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, in_channels]`. |
-> Tensor v'2 Int32 | paddings: A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of |
-> Tensor v'3 t | filter: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. |
-> Tensor Build t | output |
Performs a padding as a preprocess during a convolution.
Similar to FusedResizeAndPadConv2d, this op allows for an optimized
implementation where the spatial padding transformation stage is fused with the
im2col lookup, but in this case without the bilinear filtering required for
resizing. Fusing the padding prevents the need to write out the intermediate
results as whole tensors, reducing memory pressure, and we can get some latency
gains by merging the transformation calculations.
The data_format attribute for Conv2D isn't supported by this op, and NHWC
order is used instead.
Internally this op uses a single per-graph scratch buffer, which means that it
will block if multiple versions are being run in parallel. This is because this
operator is primarily an optimization to minimize memory usage.
:: OneOf `[Word16, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, in_channels]`. |
-> Tensor v'2 Int32 | paddings: A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of |
-> Tensor v'3 t | filter: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. |
-> Tensor Build t | output |
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, in_channels]`. |
-> Tensor v'2 Int32 | size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. |
-> Tensor v'3 Int32 | paddings: A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of |
-> Tensor v'4 t | filter: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. |
-> Tensor Build t | output |
Performs a resize and padding as a preprocess during a convolution.
It's often possible to do spatial transformations more efficiently as part of
the packing stage of a convolution, so this op allows for an optimized
implementation where these stages are fused together. This prevents the need to
write out the intermediate results as whole tensors, reducing memory pressure,
and we can get some latency gains by merging the transformation calculations.
The data_format attribute for Conv2D isn't supported by this op, and defaults to
NHWC
order.
Internally this op uses a single per-graph scratch buffer, which means that it
will block if multiple versions are being run in parallel. This is because this
operator is primarily an optimization to minimize memory usage.
:: OneOf `[Word16, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: 4-D with shape `[batch, in_height, in_width, in_channels]`. |
-> Tensor v'2 Int32 | size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. |
-> Tensor v'3 Int32 | paddings: A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of |
-> Tensor v'4 t | filter: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. |
-> Tensor Build t | output |
:: (TensorType tparams, OneOf `[Int32, Int64]` tindices) | |
=> Tensor v'1 tparams | params |
-> Tensor v'2 tindices | indices |
-> Tensor Build tparams | output |
Gather slices from params
according to indices
.
indices
must be an integer tensor of any dimension (usually 0-D or 1-D).
Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
```python # Scalar indices output[:, ..., :] = params[indices, :, ... :]
# Vector indices output[i, :, ..., :] = params[indices[i], :, ... :]
# Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] ```
If indices
is a permutation and `len(indices) == params.shape[0]` then
this operation will permute params
accordingly.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/Gather.png" alt /div
:: (TensorType tparams, OneOf `[Int32, Int64]` tindices) | |
=> Tensor v'1 tparams | params: `P-D`. The tensor from which to gather values. |
-> Tensor v'2 tindices | indices: `Q-D`. Index tensor having shape `[d_0, ..., d_{Q-2}, K]`. |
-> Tensor Build tparams | output: `(P+Q-K-1)-D`. Values from |
Gather values or slices from params
according to indices
.
params
is a Tensor of rank P
and indices
is a Tensor of rank Q
.
indices
must be integer tensor, containing indices into params
.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of indices
(with length K
) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the K
th
dimension of params
.
Produces an output tensor with shape
``` [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]]. ```
Some examples below.
Simple indexing into a matrix:
```python
indices = [[0, 0], [1, 1]]
params = [[a
, b
], [c
, d
]]
output = [a
, d
]
```
Slice indexing into a matrix:
```python
indices = [[1], [0]]
params = [[a
, b
], [c
, d
]]
output = [[c
, d
], [a
, b
]]
```
Indexing into a 3-tensor:
```python
indices = [[1]]
params = [[[a0
, b0
], [c0
, d0
]],
[[a1
, b1
], [c1
, d1
]]]
output = [[[a1
, b1
], [c1
, d1
]]]
indices = [[0, 1], [1, 0]]
params = [[[a0
, b0
], [c0
, d0
]],
[[a1
, b1
], [c1
, d1
]]]
output = [[c0
, d0
], [a1
, b1
]]
indices = [[0, 0, 1], [1, 0, 1]]
params = [[[a0
, b0
], [c0
, d0
]],
[[a1
, b1
], [c1
, d1
]]]
output = [b0
, b1
]
```
Batched indexing into a matrix:
```python
indices = [[[0, 0]], [[0, 1]]]
params = [[a
, b
], [c
, d
]]
output = [[a
], [b
]]
```
Batched slice indexing into a matrix:
```python
indices = [[[1]], [[0]]]
params = [[a
, b
], [c
, d
]]
output = [[[c
, d
]], [[a
, b
]]]
```
Batched indexing into a 3-tensor:
```python
indices = [[[1]], [[0]]]
params = [[[a0
, b0
], [c0
, d0
]],
[[a1
, b1
], [c1
, d1
]]]
output = [[[[a1
, b1
], [c1
, d1
]]],
[[[a0
, b0
], [c0
, d0
]]]]
indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
params = [[[a0
, b0
], [c0
, d0
]],
[[a1
, b1
], [c1
, d1
]]]
output = [[[c0
, d0
], [a1
, b1
]],
[[a0
, b0
], [c1
, d1
]]]
indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
params = [[[a0
, b0
], [c0
, d0
]],
[[a1
, b1
], [c1
, d1
]]]
output = [[b0
, b1
], [d0
, c1
]]
```
:: (TensorType tparams, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor v'1 tparams | params: `P-D`. The tensor from which to gather values. |
-> Tensor v'2 tindices | indices: `Q-D`. Index tensor having shape `[d_0, ..., d_{Q-2}, K]`. |
-> Tensor Build tparams | output: `(P+Q-K-1)-D`. Values from |
:: TensorType t | |
=> Tensor v'1 t | value: The tensor to be stored. |
-> Tensor Build ByteString | handle: The handle for the tensor stored in the session state. |
Store the input tensor in the state of the current session.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | value: The tensor to be stored. |
-> Tensor Build ByteString | handle: The handle for the tensor stored in the session state. |
:: TensorType dtype | |
=> Tensor v'1 ByteString | handle: The handle for a tensor stored in the session state. |
-> Tensor Build dtype | value: The tensor for the given handle. |
Get the value of the tensor specified by its handle.
:: TensorType dtype | |
=> OpParams | |
-> Tensor v'1 ByteString | handle: The handle for a tensor stored in the session state. |
-> Tensor Build dtype | value: The tensor for the given handle. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build Bool | z |
Returns the truth value of (x > y) element-wise.
- NOTE*:
Greater
supports broadcasting. More about broadcasting here
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build Bool | z |
Returns the truth value of (x >= y) element-wise.
- NOTE*:
GreaterEqual
supports broadcasting. More about broadcasting here
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3. |
-> Tensor Build t | output: |
Convert one or more images from HSV to RGB.
Outputs a tensor of the same shape as the images
tensor, containing the RGB
value of the pixels. The output is only well defined if the value in images
are in `[0,1]`.
See rgb_to_hsv
for a description of the HSV encoding.
:: MonadBuild m' | |
=> DataType | key_dtype: Type of the table keys. |
-> DataType | value_dtype: Type of the table values. |
-> m' (Tensor Ref ByteString) | table_handle: Handle to a table. |
Creates a non-initialized hash table.
This op creates a hash table, specifying the type of its keys and values. Before using the table you will have to initialize it. After initialization the table will be immutable.
:: MonadBuild m' | |
=> OpParams | |
-> DataType | key_dtype: Type of the table keys. |
-> DataType | value_dtype: Type of the table values. |
-> m' (Tensor Ref ByteString) | table_handle: Handle to a table. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 ByteString | tag: Scalar. Tag to use for the |
-> Tensor v'2 t | values: Any shape. Values to use to build the histogram. |
-> Tensor Build ByteString | summary: Scalar. Serialized |
Outputs a Summary
protocol buffer with a histogram.
The generated
`Summary`
has one summary value containing a histogram for values
.
This op reports an InvalidArgument
error if any value is not finite.
:: Tensor v'1 (Complex Float) | input: A complex64 tensor. |
-> Tensor Build (Complex Float) | output: A complex64 tensor of the same shape as |
Compute the inverse 1-dimensional discrete Fourier Transform over the inner-most
dimension of input
.
:: Tensor v'1 (Complex Float) | input: A complex64 tensor. |
-> Tensor Build (Complex Float) | output: A complex64 tensor of the same shape as
|
Compute the inverse 2-dimensional discrete Fourier Transform over the inner-most
2 dimensions of input
.
:: OpParams | |
-> Tensor v'1 (Complex Float) | input: A complex64 tensor. |
-> Tensor Build (Complex Float) | output: A complex64 tensor of the same shape as
|
:: Tensor v'1 (Complex Float) | input: A complex64 tensor. |
-> Tensor Build (Complex Float) | output: A complex64 tensor of the same shape as
|
Compute the inverse 3-dimensional discrete Fourier Transform over the inner-most
3 dimensions of input
.
:: OpParams | |
-> Tensor v'1 (Complex Float) | input: A complex64 tensor. |
-> Tensor Build (Complex Float) | output: A complex64 tensor of the same shape as
|
:: TensorType t | |
=> Tensor v'1 t | input |
-> Tensor Build t | output |
Return a tensor with the same shape and contents as the input tensor or value.
:: MonadBuild m' | |
=> m' (Tensor Ref ByteString) | reader_handle: The handle to reference the Reader. |
A Reader that outputs the queued work as both the key and value.
To use, enqueue strings in a Queue. ReaderRead will take the front work string and output (work, work).
:: MonadBuild m' | |
=> OpParams | |
-> m' (Tensor Ref ByteString) | reader_handle: The handle to reference the Reader. |
:: MonadBuild m' | |
=> m' ResourceHandle | reader_handle: The handle to reference the Reader. |
A Reader that outputs the queued work as both the key and value.
To use, enqueue strings in a Queue. ReaderRead will take the front work string and output (work, work).
:: MonadBuild m' | |
=> OpParams | |
-> m' ResourceHandle | reader_handle: The handle to reference the Reader. |
Compute the lower regularized incomplete Gamma function `Q(a, x)`.
The lower regularized incomplete Gamma function is defined as:
``` P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) ``` where ``` gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt ``` is the lower incomplete Gamma function.
Note, above `Q(a, x)` (Igammac
) is the upper regularized complete
Gamma function.
Compute the upper regularized incomplete Gamma function `Q(a, x)`.
The upper regularized incomplete Gamma function is defined as:
``` Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) ``` where ``` Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt ``` is the upper incomplete Gama function.
Note, above `P(a, x)` (Igamma
) is the lower regularized complete
Gamma function.
:: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) | |
=> Tensor v'1 t | input |
-> Tensor Build tout | output |
Returns the imaginary part of a complex number.
Given a tensor input
of complex numbers, this operation returns a tensor of
type float
that is the imaginary part of each element in input
. All
elements in input
must be complex numbers of the form \(a + bj\), where *a*
is the real part and *b* is the imaginary part returned by this operation.
For example:
```
# tensor input
is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
:: OneOf `[Word16, Word8, Float]` t | |
=> Tensor v'1 ByteString | tag: Scalar. Used to build the |
-> Tensor v'2 t | tensor: 4-D of shape `[batch_size, height, width, channels]` where
|
-> Tensor Build ByteString | summary: Scalar. Serialized |
Outputs a Summary
protocol buffer with images.
The summary has up to max_images
summary values containing images. The
images are built from tensor
which must be 4-D with shape `[batch_size,
height, width, channels]` and where channels
can be:
- 1:
tensor
is interpreted as Grayscale. - 3:
tensor
is interpreted as RGB. - 4:
tensor
is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. uint8
values are unchanged. The op uses two different
normalization algorithms:
- If the input values are all positive, they are rescaled so the largest one is 255.
- If any input value is negative, the values are shifted so input value 0.0 is at 127. They are then rescaled so that either the smallest value is 0, or the largest one is 255.
The tag
argument is a scalar Tensor
of type string
. It is used to
build the tag
of the summary values:
- If
max_images
is 1, the summary value tag is '*tag*/image'. - If
max_images
is greater than 1, the summary value tags are generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
The bad_color
argument is the color to use in the generated images for
non-finite input values. It is a unit8
1-D tensor of length channels
.
Each element must be in the range `[0, 255]` (It represents the value of a
pixel in the output image). Non-finite values in the input tensor are
replaced by this tensor in the output image. The default value is the color
red.
:: OneOf `[Word16, Word8, Float]` t | |
=> OpParams | |
-> Tensor v'1 ByteString | tag: Scalar. Used to build the |
-> Tensor v'2 t | tensor: 4-D of shape `[batch_size, height, width, channels]` where
|
-> Tensor Build ByteString | summary: Scalar. Serialized |
:: TensorType dtype | |
=> Shape | shape: Shape of the returned tensor. |
-> Tensor Build dtype | tensor |
Returns immutable tensor from memory region.
The current implementation memmaps the tensor from a file.
:: TensorType dtype | |
=> OpParams | |
-> Shape | shape: Shape of the returned tensor. |
-> Tensor Build dtype | tensor |
:: OneOf `[Int32, Int64]` t | |
=> Int64 | k: Number of top elements to look at for computing precision. |
-> Tensor v'1 Float | predictions: A |
-> Tensor v'2 t | targets: A |
-> Tensor Build Bool | precision: Computed Precision at |
Says whether the targets are in the top K
predictions.
This outputs a batch_size
bool array, an entry `out[i]` is true
if the
prediction for the target class is among the top k
predictions among
all predictions for example i
. Note that the behavior of InTopK
differs
from the TopK
op in its handling of ties; if multiple classes have the
same prediction value and straddle the top-k
boundary, all of those
classes are considered to be in the top k
.
More formally, let
\(predictions_i\) be the predictions for all classes for example i
,
\(targets_i\) be the target class for example i
,
\(out_i\) be the output for example i
,
$$out_i = predictions_{i, targets_i} in TopKIncludingTies(predictions_i)$$
:: OneOf `[Int32, Int64]` t | |
=> OpParams | |
-> Int64 | k: Number of top elements to look at for computing precision. |
-> Tensor v'1 Float | predictions: A |
-> Tensor v'2 t | targets: A |
-> Tensor Build Bool | precision: Computed Precision at |
:: (MonadBuild m', TensorType tkey, TensorType tval) | |
=> Tensor Ref ByteString | table_handle: Handle to a table which will be initialized. |
-> Tensor v'2 tkey | keys: Keys of type Tkey. |
-> Tensor v'3 tval | values: Values of type Tval. |
-> m' ControlNode |
Table initializer that takes two tensors for keys and values respectively.
:: (MonadBuild m', TensorType tkey, TensorType tval) | |
=> OpParams | |
-> Tensor Ref ByteString | table_handle: Handle to a table which will be initialized. |
-> Tensor v'2 tkey | keys: Keys of type Tkey. |
-> Tensor v'3 tval | values: Values of type Tval. |
-> m' ControlNode |
:: MonadBuild m' | |
=> Int64 | key_index: Column index in a line to get the table |
-> Int64 | value_index: Column index that represents information of a line to get the table
|
-> Tensor Ref ByteString | table_handle: Handle to a table which will be initialized. |
-> Tensor v'2 ByteString | filename: Filename of a vocabulary text file. |
-> m' ControlNode |
Initializes a table from a text file.
It inserts one key-value pair into the table for each line of the file.
The key and value is extracted from the whole line content, elements from the
split line based on delimiter
or the line number (starting from zero).
Where to extract the key and value from a line is specified by key_index
and
value_index
.
- A value of -1 means use the line number(starting from zero), expects
int64
. - A value of -2 means use the whole line content, expects
string
. - A value >= 0 means use the index (starting at zero) of the split line based
on
delimiter
.
:: MonadBuild m' | |
=> OpParams | |
-> Int64 | key_index: Column index in a line to get the table |
-> Int64 | value_index: Column index that represents information of a line to get the table
|
-> Tensor Ref ByteString | table_handle: Handle to a table which will be initialized. |
-> Tensor v'2 ByteString | filename: Filename of a vocabulary text file. |
-> m' ControlNode |
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes the reciprocal of x element-wise.
I.e., \(y = 1 / x\).
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Computes the gradient for the inverse of x
wrt its input.
Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy
is the corresponding input gradient.
Computes the inverse permutation of a tensor.
This operation computes the inverse of an index permutation. It takes a 1-D
integer tensor x
, which represents the indices of a zero-based array, and
swaps each value with its index position. In other words, for an output tensor
y
and an input tensor x
, this operation computes the following:
`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
The values must include 0. There can be no duplicate values or negative values.
For example:
```prettyprint
# tensor x
is [3, 4, 0, 2, 1]
invert_permutation(x) ==> [2, 4, 3, 0, 1]
```
Returns which elements of x are finite.
compatibility(numpy)
Equivalent to np.isfinite
end_compatibility
Returns which elements of x are Inf.
compatibility(numpy)
Equivalent to np.isinf
end_compatibility
Returns which elements of x are NaN.
compatibility(numpy)
Equivalent to np.isnan
end_compatibility
:: (MonadBuild m', TensorType dtype) | |
=> Tensor Ref dtype | ref: Should be from a |
-> m' (Tensor Value Bool) | is_initialized |
Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> Tensor Ref dtype | ref: Should be from a |
-> m' (Tensor Value Bool) | is_initialized |
Local Response Normalization.
The 4-D input
tensor is treated as a 3-D array of 1-D vectors (along the last
dimension), and each vector is normalized independently. Within a given vector,
each component is divided by the weighted, squared sum of inputs within
depth_radius
. In detail,
sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) output = input / (bias + alpha * sqr_sum) ** beta
For details, see Krizhevsky et al., ImageNet classification with deep convolutional neural networks (NIPS 2012).
:: OneOf `[Word16, Float]` t | |
=> Tensor v'1 t | input_grads: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 t | input_image: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'3 t | output_image: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor Build t | output: The gradients for LRN. |
Gradients for Local Response Normalization.
:: OneOf `[Word16, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input_grads: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 t | input_image: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'3 t | output_image: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor Build t | output: The gradients for LRN. |
learnedUnigramCandidateSampler
:: Int64 | num_sampled: Number of candidates to randomly sample per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Int64 | range_max: The sampler will sample integers from the interval [0, range_max). |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
Generates labels for candidate sampling with a learned unigram distribution.
See explanations of candidate sampling and the data formats at go/candidate-sampling.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
learnedUnigramCandidateSampler'
:: OpParams | |
-> Int64 | num_sampled: Number of candidates to randomly sample per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Int64 | range_max: The sampler will sample integers from the interval [0, range_max). |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build Bool | z |
Returns the truth value of (x < y) element-wise.
- NOTE*:
Less
supports broadcasting. More about broadcasting here
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build Bool | z |
Returns the truth value of (x <= y) element-wise.
- NOTE*:
LessEqual
supports broadcasting. More about broadcasting here
Computes the log of the absolute value of `Gamma(x)` element-wise.
:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | start: First entry in the range. |
-> Tensor v'2 t | stop: Last entry in the range. |
-> Tensor v'3 tidx | num: Number of values to generate. |
-> Tensor Build t | output: 1-D. The generated values. |
Generates values in an interval.
A sequence of num
evenly-spaced values are generated beginning at start
.
If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
so that the last one is exactly stop
.
For example:
``` tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] ```
:: (TensorType t, OneOf `[Int32, Int64]` out_idx) | |
=> Tensor v'1 t | x: 1-D. Values to keep. |
-> Tensor v'2 t | y: 1-D. Values to remove. |
-> (Tensor Build t, Tensor Build out_idx) | (out, idx)
|
Computes the difference between two lists of numbers or strings.
Given a list x
and a list y
, this operation returns a list out
that
represents all values that are in x
but not in y
. The returned list out
is sorted in the same order that the numbers appear in x
(duplicates are
preserved). This operation also returns a list idx
that represents the
position of each out
element in x
. In other words:
`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
For example, given this input:
```prettyprint x = [1, 2, 3, 4, 5, 6] y = [1, 3, 5] ```
This operation would return:
```prettyprint out ==> [2, 4, 6] idx ==> [1, 3, 5] ```
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes natural logarithm of x element-wise.
I.e., \(y = log_e x\).
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes natural logarithm of (1 + x) element-wise.
I.e., \(y = log_e (1 + x)\).
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 t | logits: 2-D with shape `[batch_size, num_classes]`. |
-> Tensor Build t | logsoftmax: Same shape as |
Computes log softmax activations.
For each batch i
and class j
we have
logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
:: Int64 | num_sampled: Number of candidates to randomly sample per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Int64 | range_max: The sampler will sample integers from the interval [0, range_max). |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
Generates labels for candidate sampling with a log-uniform distribution.
See explanations of candidate sampling and the data formats at go/candidate-sampling.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
:: OpParams | |
-> Int64 | num_sampled: Number of candidates to randomly sample per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Int64 | range_max: The sampler will sample integers from the interval [0, range_max). |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
Returns the truth value of x AND y element-wise.
- NOTE*:
LogicalAnd
supports broadcasting. More about broadcasting here
Returns the truth value of x OR y element-wise.
- NOTE*:
LogicalOr
supports broadcasting. More about broadcasting here
:: (MonadBuild m', TensorType tkeys, TensorType tvalues) | |
=> Tensor Ref ByteString | table_handle: Handle to the table. |
-> m' (Tensor Value tkeys, Tensor Value tvalues) | (keys, values)
|
Outputs all keys and values in the table.
:: (MonadBuild m', TensorType tkeys, TensorType tvalues) | |
=> OpParams | |
-> Tensor Ref ByteString | table_handle: Handle to the table. |
-> m' (Tensor Value tkeys, Tensor Value tvalues) | (keys, values)
|
:: (MonadBuild m', TensorType tin, TensorType tout) | |
=> Tensor Ref ByteString | table_handle: Handle to the table. |
-> Tensor v'2 tin | keys: Any shape. Keys to look up. |
-> Tensor v'3 tout | default_value |
-> m' (Tensor Value tout) | values: Same shape as |
Looks up keys in a table, outputs the corresponding values.
The tensor keys
must of the same type as the keys of the table.
The output values
is of the type of the table values.
The scalar default_value
is the value output for keys not present in the
table. It must also be of the same type as the table values.
:: (MonadBuild m', TensorType tin, TensorType tout) | |
=> OpParams | |
-> Tensor Ref ByteString | table_handle: Handle to the table. |
-> Tensor v'2 tin | keys: Any shape. Keys to look up. |
-> Tensor v'3 tout | default_value |
-> m' (Tensor Value tout) | values: Same shape as |
:: (MonadBuild m', TensorType tin, TensorType tout) | |
=> Tensor Ref ByteString | table_handle: Handle to the table. |
-> Tensor v'2 tin | keys: Any shape. Keys to look up. |
-> Tensor v'3 tout | values: Values to associate with keys. |
-> m' ControlNode |
Replaces the contents of the table with the specified keys and values.
The tensor keys
must be of the same type as the keys of the table.
The tensor values
must be of the type of the table values.
:: (MonadBuild m', TensorType tin, TensorType tout) | |
=> OpParams | |
-> Tensor Ref ByteString | table_handle: Handle to the table. |
-> Tensor v'2 tin | keys: Any shape. Keys to look up. |
-> Tensor v'3 tout | values: Values to associate with keys. |
-> m' ControlNode |
:: (MonadBuild m', TensorType tin, TensorType tout) | |
=> Tensor Ref ByteString | table_handle: Handle to the table. |
-> Tensor v'2 tin | keys: Any shape. Keys to look up. |
-> Tensor v'3 tout | values: Values to associate with keys. |
-> m' ControlNode |
Updates the table to associates keys with values.
The tensor keys
must be of the same type as the keys of the table.
The tensor values
must be of the type of the table values.
:: (MonadBuild m', TensorType tin, TensorType tout) | |
=> OpParams | |
-> Tensor Ref ByteString | table_handle: Handle to the table. |
-> Tensor v'2 tin | keys: Any shape. Keys to look up. |
-> Tensor v'3 tout | values: Values to associate with keys. |
-> m' ControlNode |
:: MonadBuild m' | |
=> Tensor Ref ByteString | table_handle: Handle to the table. |
-> m' (Tensor Value Int64) | size: Scalar that contains number of elements in the table. |
Computes the number of elements in the given table.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | table_handle: Handle to the table. |
-> m' (Tensor Value Int64) | size: Scalar that contains number of elements in the table. |
:: Tensor v'1 Bool | input: A boolean scalar, representing the branch predicate of the Switch op. |
-> Tensor Build Bool | output: The same tensor as |
Forwards the input to the output.
This operator represents the loop termination condition used by the "pivot" switches of a loop.
:: OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t | |
=> Tensor v'1 t | a |
-> Tensor v'2 t | b |
-> Tensor Build t | product |
Multiply the matrix "a" by the matrix "b".
The inputs must be two-dimensional matrices and the inner dimension of "a" (after being transposed if transpose_a is true) must match the outer dimension of "b" (after being transposed if transposed_b is true).
- Note*: The default kernel implementation for MatMul on GPUs uses cublas.
:: Tensor v'1 ByteString | pattern: A (scalar) shell wildcard pattern. |
-> Tensor Build ByteString | filenames: A vector of matching filenames. |
Returns the set of files matching a pattern.
Note that this routine only supports wildcard characters in the basename portion of the pattern, not in the directory portion.
:: OpParams | |
-> Tensor v'1 ByteString | pattern: A (scalar) shell wildcard pattern. |
-> Tensor Build ByteString | filenames: A vector of matching filenames. |
:: TensorType t | |
=> Tensor v'1 t | input: Rank |
-> Tensor v'2 Int64 | num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire lower triangle. |
-> Tensor v'3 Int64 | num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep entire upper triangle. |
-> Tensor Build t | band: Rank |
Copy a tensor setting everything outside a central band in each innermost matrix
to zero.
The band
part is computed as follows:
Assume input
has k
dimensions `[I, J, K, ..., M, N]`, then the output is a
tensor with the same shape where
`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
The indicator function
`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && (num_upper < 0 || (n-m) <= num_upper)`.
For example:
```prettyprint
# if input
is [[ 0, 1, 2, 3]
[-1, 0, 1, 2]
[-2, -1, 0, 1]
[-3, -2, -1, 0]],
tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] [-1, 0, 1, 2] [ 0, -1, 0, 1] [ 0, 0, -1, 0]],
tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] [-1, 0, 1, 0] [-2, -1, 0, 1] [ 0, -2, -1, 0]] ```
Useful special cases:
```prettyprint tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. tf.matrix_band_part(input, 0, 0) ==> Diagonal. ```
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | input: Rank |
-> Tensor v'2 Int64 | num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire lower triangle. |
-> Tensor v'3 Int64 | num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep entire upper triangle. |
-> Tensor Build t | band: Rank |
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | input: Shape is `[..., M, M]`. |
-> Tensor Build t | output: Shape is `[...]`. |
Computes the determinant of one ore more square matrices.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. The output is a tensor containing the determinants for all input submatrices `[..., :, :]`.
:: TensorType t | |
=> Tensor v'1 t | diagonal: Rank |
-> Tensor Build t | output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`. |
Returns a batched diagonal tensor with a given batched diagonal values.
Given a diagonal
, this operation returns a tensor with the diagonal
and
everything else padded with zeros. The diagonal is computed as follows:
Assume diagonal
has k
dimensions `[I, J, K, ..., N]`, then the output is a
tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
For example:
```prettyprint
# diagonal
is [[1, 2, 3, 4], [5, 6, 7, 8]]
and diagonal.shape = (2, 4)
tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, 8]]]
which has shape (2, 4, 4) ```
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | diagonal: Rank |
-> Tensor Build t | output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`. |
:: TensorType t | |
=> Tensor v'1 t | input: Rank |
-> Tensor Build t | diagonal: The extracted diagonal(s) having shape `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`. |
Returns the batched diagonal part of a batched tensor.
This operation returns a tensor with the diagonal
part
of the batched input
. The diagonal
part is computed as follows:
Assume input
has k
dimensions `[I, J, K, ..., M, N]`, then the output is a
tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
`diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
The input must be at least a matrix.
For example:
```prettyprint
# input
is [[[1, 0, 0, 0]
[0, 2, 0, 0]
[0, 0, 3, 0]
[0, 0, 0, 4]],
[[5, 0, 0, 0]
[0, 6, 0, 0]
[0, 0, 7, 0]
[0, 0, 0, 8]]]
and input.shape = (2, 4, 4)
tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
which has shape (2, 4) ```
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | input: Rank |
-> Tensor Build t | diagonal: The extracted diagonal(s) having shape `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`. |
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | input: Shape is `[..., M, M]`. |
-> Tensor Build t | output: Shape is `[..., M, M]`.
|
Computes the inverse of one or more square invertible matrices or their
adjoints (conjugate transposes).
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. The output is a tensor of the same shape as the input containing the inverse for all input submatrices `[..., :, :]`.
The op uses LU decomposition with partial pivoting to compute the inverses.
If a matrix is not invertible there is no guarantee what the op does. It may detect the condition and raise an exception or it may simply return a garbage result.
:: TensorType t | |
=> Tensor v'1 t | input: Rank `k+1`, where `k >= 1`. |
-> Tensor v'2 t | diagonal: Rank |
-> Tensor Build t | output: Rank `k+1`, with `output.shape = input.shape`. |
Returns a batched matrix tensor with new batched diagonal values.
Given input
and diagonal
, this operation returns a tensor with the
same shape and values as input
, except for the main diagonal of the
innermost matrices. These will be overwritten by the values in diagonal
.
The output is computed as follows:
Assume input
has `k+1` dimensions `[I, J, K, ..., M, N]` and diagonal
has
k
dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a
tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
- `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
- `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
:: OneOf `[Complex Double, Complex Float, Double, Float]` t | |
=> Tensor v'1 t | matrix: Shape is `[..., M, M]`. |
-> Tensor v'2 t | rhs: Shape is `[..., M, K]`. |
-> Tensor Build t | output: Shape is `[..., M, K]`. |
Solves systems of linear equations.
Matrix
is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
form square matrices. Rhs
is a tensor of shape `[..., M, K]`. The output
is
a tensor shape `[..., M, K]`. If adjoint
is False
then each output matrix
satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
If adjoint
is True
then each output matrix satisfies
`adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | matrix: Shape is `[..., M, N]`. |
-> Tensor v'2 t | rhs: Shape is `[..., M, K]`. |
-> Tensor v'3 Double | l2_regularizer: Scalar tensor.
|
-> Tensor Build t | output: Shape is `[..., N, K]`. |
Solves one or more linear least-squares problems.
matrix
is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form matrices of size `[M, N]`. Rhs is a tensor of shape `[..., M, K]`.
The output is a tensor shape `[..., N, K]` where each output matrix solves
each of the equations matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]
in the least squares sense.
matrix and right-hand sides in the batch:
matrix
=\(A in Re^{m times n}\),
rhs
=\(B in Re^{m times k}\),
output
=\(X in Re^{n times k}\),
l2_regularizer
=\(lambda\).
If fast
is True
, then the solution is computed by solving the normal
equations using Cholesky decomposition. Specifically, if \(m ge n\) then
\(X = (A^T A + lambda I)^{-1} A^T B\), which solves the least-squares
problem \(X = mathrm{argmin}_{Z in Re^{n times k} } ||A Z - B||_F^2 +
lambda ||Z||_F^2\). If \(m lt n\) then output
is computed as
\(X = A^T (A A^T + lambda I)^{-1} B\), which (for \(lambda = 0\)) is the
minimum-norm solution to the under-determined linear system, i.e.
\(X = mathrm{argmin}_{Z in Re^{n times k} } ||Z||_F^2 \), subject to
\(A Z = B\). Notice that the fast path is only numerically stable when
\(A\) is numerically full rank and has a condition number
\(mathrm{cond}(A) lt frac{1}{sqrt{epsilon_{mach} } }\) or\(lambda\) is
sufficiently large.
If fast
is False
an algorithm based on the numerically robust complete
orthogonal decomposition is used. This computes the minimum-norm
least-squares solution, even when \(A\) is rank deficient. This path is
typically 6-7 times slower than the fast path. If fast
is False
then
l2_regularizer
is ignored.
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | matrix: Shape is `[..., M, M]`. |
-> Tensor v'2 t | rhs: Shape is `[..., M, K]`. |
-> Tensor Build t | output: Shape is `[..., M, K]`. |
Solves systems of linear equations with upper or lower triangular matrices by
backsubstitution.
matrix
is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
square matrices. If lower
is True
then the strictly upper triangular part
of each inner-most matrix is assumed to be zero and not accessed.
If lower
is False then the strictly lower triangular part of each inner-most
matrix is assumed to be zero and not accessed.
rhs
is a tensor of shape `[..., M, K]`.
The output is a tensor of shape `[..., M, K]`. If adjoint
is
True
then the innermost matrices in output` satisfy matrix equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
If adjoint
is False
then the strictly then the innermost matrices in
output
satisfy matrix equations
`adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | input: The tensor to reduce. |
-> Tensor v'2 tidx | reduction_indices: The dimensions to reduce. |
-> Tensor Build t | output: The reduced tensor. |
Computes the maximum of elements across dimensions of a tensor.
Reduces input
along the dimensions given in reduction_indices
. Unless
keep_dims
is true, the rank of the tensor is reduced by 1 for each entry in
reduction_indices
. If keep_dims
is true, the reduced dimensions are
retained with length 1.
:: OneOf `[Word16, Float]` t | |
=> Tensor v'1 t | input: 4-D input to pool over. |
-> Tensor Build t | output: The max pooled output tensor. |
Performs max pooling on the input.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. |
-> Tensor Build t | output: The max pooled output tensor. |
Performs 3D max pooling on the input.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 Float | orig_input: The original input tensor. |
-> Tensor v'2 Float | orig_output: The original output tensor. |
-> Tensor v'3 t | grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. |
-> Tensor Build t | output |
Computes gradients of max pooling function.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Float | orig_input: The original input tensor. |
-> Tensor v'2 Float | orig_output: The original output tensor. |
-> Tensor v'3 t | grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. |
-> Tensor Build t | output |
:: OneOf `[Word16, Float]` t | |
=> Tensor v'1 t | orig_input: The original input tensor. |
-> Tensor v'2 t | orig_output: The original output tensor. |
-> Tensor v'3 t | grad: 4-D. Gradients w.r.t. the output of |
-> Tensor Build t | output: Gradients w.r.t. the input to |
Computes gradients of the maxpooling function.
:: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) | |
=> Tensor v'1 t | input: The original input. |
-> Tensor v'2 t | grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
output of |
-> Tensor v'3 targmax | argmax: The indices of the maximum values chosen for each output of |
-> Tensor Build t | output: Gradients w.r.t. the input of |
Computes gradients of the maxpooling function.
:: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) | |
=> OpParams | |
-> Tensor v'1 t | input: The original input. |
-> Tensor v'2 t | grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
output of |
-> Tensor v'3 targmax | argmax: The indices of the maximum values chosen for each output of |
-> Tensor Build t | output: Gradients w.r.t. the input of |
:: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) | |
=> Tensor v'1 t | input: 4-D with shape `[batch, height, width, channels]`. Input to pool over. |
-> (Tensor Build t, Tensor Build targmax) | (output, argmax)
|
Performs max pooling on the input and outputs both max values and indices.
The indices in argmax
are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index
`((b * height + y) * width + x) * channels + c`.
:: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) | |
=> OpParams | |
-> Tensor v'1 t | input: 4-D with shape `[batch, height, width, channels]`. Input to pool over. |
-> (Tensor Build t, Tensor Build targmax) | (output, argmax)
|
:: OneOf `[Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Returns the max of x and y (i.e. x > y ? x : y) element-wise.
- NOTE*:
Maximum
supports broadcasting. More about broadcasting here
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | input: The tensor to reduce. |
-> Tensor v'2 tidx | reduction_indices: The dimensions to reduce. |
-> Tensor Build t | output: The reduced tensor. |
Computes the mean of elements across dimensions of a tensor.
Reduces input
along the dimensions given in reduction_indices
. Unless
keep_dims
is true, the rank of the tensor is reduced by 1 for each entry in
reduction_indices
. If keep_dims
is true, the reduced dimensions are
retained with length 1.
:: TensorType t | |
=> [Tensor v'1 t] | inputs: The input tensors, exactly one of which will become available. |
-> (Tensor Build t, Tensor Build Int32) | (output, value_index)
|
Forwards the value of an available tensor from inputs
to output
.
Merge
waits for at least one of the tensors in inputs
to become available.
It is usually combined with Switch
to implement branching.
Merge
forwards the first tensor for become available to output
, and sets
value_index
to its index in inputs
.
:: [Tensor v'1 ByteString] | inputs: Can be of any shape. Each must contain serialized |
-> Tensor Build ByteString | summary: Scalar. Serialized |
Merges summaries.
This op creates a `Summary` protocol buffer that contains the union of all the values in the input summaries.
When the Op is run, it reports an InvalidArgument
error if multiple values
in the summaries to merge use the same tag.
:: OpParams | |
-> [Tensor v'1 ByteString] | inputs: Can be of any shape. Each must contain serialized |
-> Tensor Build ByteString | summary: Scalar. Serialized |
:: MonadBuild m' | |
=> Tensor v'1 ByteString | checkpoint_prefixes: prefixes of V2 checkpoints to merge. |
-> Tensor v'2 ByteString | destination_prefix: scalar. The desired final prefix. Allowed to be the same as one of the checkpoint_prefixes. |
-> m' ControlNode |
V2 format specific: merges the metadata files of sharded checkpoints. The
result is one logical checkpoint, with one physical metadata file and renamed data files.
Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
If delete_old_dirs is true, attempts to delete recursively the dirname of each path in the input checkpoint_prefixes. This is useful when those paths are non user-facing temporary locations.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor v'1 ByteString | checkpoint_prefixes: prefixes of V2 checkpoints to merge. |
-> Tensor v'2 ByteString | destination_prefix: scalar. The desired final prefix. Allowed to be the same as one of the checkpoint_prefixes. |
-> m' ControlNode |
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | input: The tensor to reduce. |
-> Tensor v'2 tidx | reduction_indices: The dimensions to reduce. |
-> Tensor Build t | output: The reduced tensor. |
Computes the minimum of elements across dimensions of a tensor.
Reduces input
along the dimensions given in reduction_indices
. Unless
keep_dims
is true, the rank of the tensor is reduced by 1 for each entry in
reduction_indices
. If keep_dims
is true, the reduced dimensions are
retained with length 1.
:: OneOf `[Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Returns the min of x and y (i.e. x < y ? x : y) element-wise.
- NOTE*:
Minimum
supports broadcasting. More about broadcasting here
:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) | |
=> Tensor v'1 t | input: The input tensor to be padded. |
-> Tensor v'2 tpaddings | paddings: A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of |
-> Tensor Build t | output: The padded tensor. |
Pads a tensor with mirrored values.
This operation pads a input
with mirrored values according to the paddings
you specify. paddings
is an integer tensor with shape `[n, 2]`, where n is
the rank of input
. For each dimension D of input
, `paddings[D, 0]` indicates
how many values to add before the contents of input
in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of input
in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if copy_border
is true
(if false, respectively).
The padded size of each dimension D of the output is:
`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
For example:
```prettyprint
# t
is [[1, 2, 3], [4, 5, 6]].
# paddings
is [[1, 1]], [2, 2]].
# mode
is SYMMETRIC.
# rank of t
is 2.
pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
[2, 1, 1, 2, 3, 3, 2]
[5, 4, 4, 5, 6, 6, 5]
[5, 4, 4, 5, 6, 6, 5]]
```
:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) | |
=> Tensor v'1 t | input: The input tensor to be folded. |
-> Tensor v'2 tpaddings | paddings: A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of |
-> Tensor Build t | output: The folded tensor. |
Gradient op for MirrorPad
op. This op folds a mirror-padded tensor.
This operation folds the padded areas of input
by MirrorPad
according to the
paddings
you specify. paddings
must be the same as paddings
argument
given to the corresponding MirrorPad
op.
The folded size of each dimension D of the output is:
`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
For example:
```prettyprint
# t
is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
# paddings
is [[0, 1]], [0, 1]].
# mode
is SYMMETRIC.
# rank of t
is 2.
pad(t, paddings) ==> [[ 1, 5]
[11, 28]]
```
Returns element-wise remainder of division.
- NOTE*:
Mod
supports broadcasting. More about broadcasting here
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Returns x * y element-wise.
- NOTE*:
Mul
supports broadcasting. More about broadcasting here
:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor v'1 t | logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log probabilities for all classes. |
-> Tensor v'2 Int32 | num_samples: 0-D. Number of independent samples to draw for each row slice. |
-> m' (Tensor Value Int64) | output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` contains the drawn class labels with range `[0, num_classes)`. |
Draws samples from a multinomial distribution.
:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor v'1 t | logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log probabilities for all classes. |
-> Tensor v'2 Int32 | num_samples: 0-D. Number of independent samples to draw for each row slice. |
-> m' (Tensor Value Int64) | output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` contains the drawn class labels with range `[0, num_classes)`. |
:: (MonadBuild m', TensorType key_dtype) | |
=> DataType | value_dtype: Type of the table values. |
-> Tensor v'1 key_dtype | empty_key: The key used to represent empty key buckets internally. Must not be used in insert or lookup operations. |
-> m' (Tensor Ref ByteString) | table_handle: Handle to a table. |
Creates an empty hash table that uses tensors as the backing store. It uses
"open addressing" with quadratic reprobing to resolve collisions.
This op creates a mutable hash table, specifying the type of its keys and values. Each value must be a scalar. Data can be inserted into the table using the insert operations. It does not support the initialization operation.
:: (MonadBuild m', TensorType key_dtype) | |
=> OpParams | |
-> DataType | value_dtype: Type of the table values. |
-> Tensor v'1 key_dtype | empty_key: The key used to represent empty key buckets internally. Must not be used in insert or lookup operations. |
-> m' (Tensor Ref ByteString) | table_handle: Handle to a table. |
:: MonadBuild m' | |
=> DataType | key_dtype: Type of the table keys. |
-> DataType | value_dtype: Type of the table values. |
-> m' (Tensor Ref ByteString) | table_handle: Handle to a table. |
Creates an empty hash table.
This op creates a mutable hash table, specifying the type of its keys and values. Each value must be a scalar. Data can be inserted into the table using the insert operations. It does not support the initialization operation.
:: MonadBuild m' | |
=> OpParams | |
-> DataType | key_dtype: Type of the table keys. |
-> DataType | value_dtype: Type of the table values. |
-> m' (Tensor Ref ByteString) | table_handle: Handle to a table. |
:: MonadBuild m' | |
=> DataType | key_dtype: Type of the table keys. |
-> DataType | value_dtype: Type of the table values. |
-> m' (Tensor Ref ByteString) | table_handle: Handle to a table. |
Creates an empty hash table.
This op creates a mutable hash table, specifying the type of its keys and values. Each value must be a vector. Data can be inserted into the table using the insert operations. It does not support the initialization operation.
:: MonadBuild m' | |
=> OpParams | |
-> DataType | key_dtype: Type of the table keys. |
-> DataType | value_dtype: Type of the table values. |
-> m' (Tensor Ref ByteString) | table_handle: Handle to a table. |
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes numerical negative value element-wise.
I.e., \(y = -x\).
:: MonadBuild m' | |
=> Int64 | num_negative_samples: Number of negative samples per example. |
-> Tensor Ref Float | w_in: input word embedding. |
-> Tensor Ref Float | w_out: output word embedding. |
-> Tensor v'3 Int32 | examples: A vector of word ids. |
-> Tensor v'4 Int32 | labels: A vector of word ids. |
-> Tensor v'5 Float | lr |
-> m' ControlNode |
Training via negative sampling.
:: MonadBuild m' | |
=> OpParams | |
-> Int64 | num_negative_samples: Number of negative samples per example. |
-> Tensor Ref Float | w_in: input word embedding. |
-> Tensor Ref Float | w_out: output word embedding. |
-> Tensor v'3 Int32 | examples: A vector of word ids. |
-> Tensor v'4 Int32 | labels: A vector of word ids. |
-> Tensor v'5 Float | lr |
-> m' ControlNode |
:: TensorType t | |
=> Tensor v'1 t | data: The tensor to be made available to the next iteration. |
-> Tensor Build t | output: The same tensor as `data`. |
Makes its input available to the next iteration.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | data: The tensor to be made available to the next iteration. |
-> Tensor Build t | output: The same tensor as `data`. |
noOp :: forall m'. MonadBuild m' => m' ControlNode
Does nothing. Only useful as a placeholder for control edges.
noOp' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode
:: Tensor v'1 Float | boxes: A 2-D float tensor of shape `[num_boxes, 4]`. |
-> Tensor v'2 Float | scores: A 1-D float tensor of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). |
-> Tensor v'3 Int32 | max_output_size: A scalar integer tensor representing the maximum number of boxes to be selected by non max suppression. |
-> Tensor Build Int32 | selected_indices: A 1-D integer tensor of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. |
Greedily selects a subset of bounding boxes in descending order of score,
pruning away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm.
The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather operation`. For example:
selected_indices = tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices)
:: OpParams | |
-> Tensor v'1 Float | boxes: A 2-D float tensor of shape `[num_boxes, 4]`. |
-> Tensor v'2 Float | scores: A 1-D float tensor of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). |
-> Tensor v'3 Int32 | max_output_size: A scalar integer tensor representing the maximum number of boxes to be selected by non max suppression. |
-> Tensor Build Int32 | selected_indices: A 1-D integer tensor of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. |
:: OneOf `[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build Bool | z |
Returns the truth value of (x != y) element-wise.
- NOTE*:
NotEqual
supports broadcasting. More about broadcasting here
:: (TensorType t, OneOf `[Int32, Int64, Word8]` tI) | |
=> Tensor v'1 tI | indices: A tensor of indices. |
-> Tensor v'2 Int32 | depth: A scalar defining the depth of the one hot dimension. |
-> Tensor v'3 t | on_value: A scalar defining the value to fill in output when `indices[j] = i`. |
-> Tensor v'4 t | off_value: A scalar defining the value to fill in output when `indices[j] != i`. |
-> Tensor Build t | output: The one-hot tensor. |
Returns a one-hot tensor.
The locations represented by indices in indices
take value on_value
,
while all other locations take value off_value
.
If the input indices
is rank N
, the output will have rank `N+1`,
The new axis is created at dimension axis
(default: the new axis is
appended at the end).
If indices
is a scalar the output shape will be a vector of length depth
.
If indices
is a vector of length features
, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If indices
is a matrix (batch) with shape `[batch, features]`,
the output shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
Examples =========
Suppose that
``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 axis = -1 ```
Then output is `[4 x 3]`:
```output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ```
Suppose that
``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0 axis = 0 ```
Then output is `[3 x 4]`:
```output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^ one_hot(1) ``` Suppose that
``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value = 0.0 axis = -1 ```
Then output is `[2 x 2 x 3]`:
```output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // one_hot(-1) ]```
:: (TensorType t, OneOf `[Int32, Int64, Word8]` tI) | |
=> OpParams | |
-> Tensor v'1 tI | indices: A tensor of indices. |
-> Tensor v'2 Int32 | depth: A scalar defining the depth of the one hot dimension. |
-> Tensor v'3 t | on_value: A scalar defining the value to fill in output when `indices[j] = i`. |
-> Tensor v'4 t | off_value: A scalar defining the value to fill in output when `indices[j] != i`. |
-> Tensor Build t | output: The one-hot tensor. |
:: TensorType t | |
=> [Tensor v'1 t] | values: Must be of same shape and type. |
-> Tensor Build t | output: The packed tensor. |
Packs a list of N
rank-R
tensors into one rank-`(R+1)` tensor.
Packs the N
tensors in values
into a tensor with rank one higher than each
tensor in values
, by packing them along the axis
dimension.
Given a list of tensors of shape `(A, B, C)`;
if `axis == 0` then the output
tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the output
tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```prettyprint
# x
is [1, 4]
# y
is [2, 5]
# z
is [3, 6]
pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unpack
.
:: TensorType t | |
=> OpParams | |
-> [Tensor v'1 t] | values: Must be of same shape and type. |
-> Tensor Build t | output: The packed tensor. |
:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) | |
=> Tensor v'1 t | input |
-> Tensor v'2 tpaddings | paddings |
-> Tensor Build t | output |
Pads a tensor with zeros.
This operation pads a input
with zeros according to the paddings
you
specify. paddings
is an integer tensor with shape `[Dn, 2]`, where n is the
rank of input
. For each dimension D of input
, `paddings[D, 0]` indicates
how many zeros to add before the contents of input
in that dimension, and
`paddings[D, 1]` indicates how many zeros to add after the contents of input
in that dimension.
The padded size of each dimension D of the output is:
`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
For example:
```prettyprint
# t
is [[1, 1], [2, 2]]
# paddings
is [[1, 1], [2, 2]]
# rank of t
is 2
pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
[0, 0, 1, 1, 0, 0]
[0, 0, 2, 2, 0, 0]
[0, 0, 0, 0, 0, 0]]
```
:: MonadBuild m' | |
=> [DataType] | component_types: The type of each component in a value. |
-> m' (Tensor Ref ByteString) | handle: The handle to the queue. |
A queue that produces elements in first-in first-out order.
Variable-size shapes are allowed by setting the corresponding shape dimensions to 0 in the shape attr. In this case DequeueMany will pad up to the maximum size of any given element in the minibatch. See below for details.
:: MonadBuild m' | |
=> OpParams | |
-> [DataType] | component_types: The type of each component in a value. |
-> m' (Tensor Ref ByteString) | handle: The handle to the queue. |
:: MonadBuild m' | |
=> [DataType] | component_types: The type of each component in a value. |
-> m' ResourceHandle | handle: The handle to the queue. |
A queue that produces elements in first-in first-out order.
Variable-size shapes are allowed by setting the corresponding shape dimensions to 0 in the shape attr. In this case DequeueMany will pad up to the maximum size of any given element in the minibatch. See below for details.
:: MonadBuild m' | |
=> OpParams | |
-> [DataType] | component_types: The type of each component in a value. |
-> m' ResourceHandle | handle: The handle to the queue. |
:: TensorType t | |
=> Shape | shape: the final shape of the result; should be equal to the shapes of any input but with the number of input values in the first dimension. |
-> [Tensor v'1 t] | values: Tensors to be concatenated. All must have size 1 in the first dimension and same shape. |
-> Tensor Build t | output: The concatenated tensor. |
Concatenates a list of N
tensors along the first dimension.
The input tensors are all required to have size 1 in the first dimension.
For example:
```prettyprint
# x
is [[1, 4]]
# y
is [[2, 5]]
# z
is [[3, 6]]
parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
```
The difference between concat and parallel_concat is that concat requires all of the inputs be computed before the operation will begin but doesn't require that the input shapes be known during graph construction. Parallel concat will copy pieces of the input into the output as they become available, in some situations this can provide a performance benefit.
:: TensorType t | |
=> OpParams | |
-> Shape | shape: the final shape of the result; should be equal to the shapes of any input but with the number of input values in the first dimension. |
-> [Tensor v'1 t] | values: Tensors to be concatenated. All must have size 1 in the first dimension and same shape. |
-> Tensor Build t | output: The concatenated tensor. |
:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) | |
=> Tensor v'1 t | shape: The shape of the output tensor. Batches are indexed by the 0th dimension. |
-> Tensor v'2 dtype | means: The mean parameter of each batch. |
-> Tensor v'3 dtype | stdevs: The standard deviation parameter of each batch. Must be greater than 0. |
-> Tensor v'4 dtype | minvals: The minimum cutoff. May be -infinity. |
-> Tensor v'5 dtype | maxvals: The maximum cutoff. May be +infinity, and must be more than the minval for each batch. |
-> m' (Tensor Value dtype) | output: A matrix of shape num_batches x samples_per_batch, filled with random truncated normal values using the parameters for each row. |
Outputs random values from a normal distribution. The parameters may each be a
scalar which applies to the entire output, or a vector of length shape[0] which stores the parameters for each batch.
:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) | |
=> OpParams | |
-> Tensor v'1 t | shape: The shape of the output tensor. Batches are indexed by the 0th dimension. |
-> Tensor v'2 dtype | means: The mean parameter of each batch. |
-> Tensor v'3 dtype | stdevs: The standard deviation parameter of each batch. Must be greater than 0. |
-> Tensor v'4 dtype | minvals: The minimum cutoff. May be -infinity. |
-> Tensor v'5 dtype | maxvals: The maximum cutoff. May be +infinity, and must be more than the minval for each batch. |
-> m' (Tensor Value dtype) | output: A matrix of shape num_batches x samples_per_batch, filled with random truncated normal values using the parameters for each row. |
:: (OneOfs `[ByteString, Int64, Float]` sparse_types, OneOfs `[ByteString, Int64, Float]` tdense) | |
=> Tensor v'1 ByteString | serialized: A vector containing a batch of binary serialized Example protos. |
-> Tensor v'2 ByteString | names: A vector containing the names of the serialized protos. May contain, for example, table key (descriptive) names for the corresponding serialized protos. These are purely useful for debugging purposes, and the presence of values here has no effect on the output. May also be an empty vector if no names are available. If non-empty, this vector must be the same length as "serialized". |
-> [Tensor v'3 ByteString] | sparse_keys: A list of Nsparse string Tensors (scalars). The keys expected in the Examples' features associated with sparse values. |
-> [Tensor v'4 ByteString] | dense_keys: A list of Ndense string Tensors (scalars). The keys expected in the Examples' features associated with dense values. |
-> TensorList v'5 tdense | dense_defaults: A list of Ndense Tensors (some may be empty). dense_defaults[j] provides default values when the example's feature_map lacks dense_key[j]. If an empty Tensor is provided for dense_defaults[j], then the Feature dense_keys[j] is required. The input type is inferred from dense_defaults[j], even when it's empty. If dense_defaults[j] is not empty, its shape must match dense_shapes[j]. |
-> ([Tensor Build Int64], TensorList Build sparse_types, [Tensor Build Int64], TensorList Build tdense) | (sparse_indices, sparse_values, sparse_shapes, dense_values)
|
Transforms a vector of brain.Example protos (as strings) into typed tensors.
:: (OneOfs `[ByteString, Int64, Float]` sparse_types, OneOfs `[ByteString, Int64, Float]` tdense) | |
=> OpParams | |
-> Tensor v'1 ByteString | serialized: A vector containing a batch of binary serialized Example protos. |
-> Tensor v'2 ByteString | names: A vector containing the names of the serialized protos. May contain, for example, table key (descriptive) names for the corresponding serialized protos. These are purely useful for debugging purposes, and the presence of values here has no effect on the output. May also be an empty vector if no names are available. If non-empty, this vector must be the same length as "serialized". |
-> [Tensor v'3 ByteString] | sparse_keys: A list of Nsparse string Tensors (scalars). The keys expected in the Examples' features associated with sparse values. |
-> [Tensor v'4 ByteString] | dense_keys: A list of Ndense string Tensors (scalars). The keys expected in the Examples' features associated with dense values. |
-> TensorList v'5 tdense | dense_defaults: A list of Ndense Tensors (some may be empty). dense_defaults[j] provides default values when the example's feature_map lacks dense_key[j]. If an empty Tensor is provided for dense_defaults[j], then the Feature dense_keys[j] is required. The input type is inferred from dense_defaults[j], even when it's empty. If dense_defaults[j] is not empty, its shape must match dense_shapes[j]. |
-> ([Tensor Build Int64], TensorList Build sparse_types, [Tensor Build Int64], TensorList Build tdense) | (sparse_indices, sparse_values, sparse_shapes, dense_values)
|
:: (OneOfs `[ByteString, Int64, Float]` context_sparse_types, OneOfs `[ByteString, Int64, Float]` tcontext_dense, OneOfs `[ByteString, Int64, Float]` feature_list_dense_types, OneOfs `[ByteString, Int64, Float]` feature_list_sparse_types) | |
=> Tensor v'1 ByteString | serialized: A scalar containing a binary serialized SequenceExample proto. |
-> Tensor v'2 ByteString | feature_list_dense_missing_assumed_empty: A vector listing the FeatureList keys which may be missing from the SequenceExample. If the associated FeatureList is missing, it is treated as empty. By default, any FeatureList not listed in this vector must exist in the SequenceExample. |
-> [Tensor v'3 ByteString] | context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). The keys expected in the Examples' features associated with context_sparse values. |
-> [Tensor v'4 ByteString] | context_dense_keys: A list of Ncontext_dense string Tensors (scalars). The keys expected in the SequenceExamples' context features associated with dense values. |
-> [Tensor v'5 ByteString] | feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors (scalars). The keys expected in the FeatureLists associated with sparse values. |
-> [Tensor v'6 ByteString] | feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). The keys expected in the SequenceExamples' feature_lists associated with lists of dense values. |
-> TensorList v'7 tcontext_dense | context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). context_dense_defaults[j] provides default values when the SequenceExample's context map lacks context_dense_key[j]. If an empty Tensor is provided for context_dense_defaults[j], then the Feature context_dense_keys[j] is required. The input type is inferred from context_dense_defaults[j], even when it's empty. If context_dense_defaults[j] is not empty, its shape must match context_dense_shapes[j]. |
-> Tensor v'8 ByteString | debug_name: A scalar containing the name of the serialized proto. May contain, for example, table key (descriptive) name for the corresponding serialized proto. This is purely useful for debugging purposes, and the presence of values here has no effect on the output. May also be an empty scalar if no name is available. |
-> ([Tensor Build Int64], TensorList Build context_sparse_types, [Tensor Build Int64], TensorList Build tcontext_dense, [Tensor Build Int64], TensorList Build feature_list_sparse_types, [Tensor Build Int64], TensorList Build feature_list_dense_types) | (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values)
|
Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.
:: (OneOfs `[ByteString, Int64, Float]` context_sparse_types, OneOfs `[ByteString, Int64, Float]` tcontext_dense, OneOfs `[ByteString, Int64, Float]` feature_list_dense_types, OneOfs `[ByteString, Int64, Float]` feature_list_sparse_types) | |
=> OpParams | |
-> Tensor v'1 ByteString | serialized: A scalar containing a binary serialized SequenceExample proto. |
-> Tensor v'2 ByteString | feature_list_dense_missing_assumed_empty: A vector listing the FeatureList keys which may be missing from the SequenceExample. If the associated FeatureList is missing, it is treated as empty. By default, any FeatureList not listed in this vector must exist in the SequenceExample. |
-> [Tensor v'3 ByteString] | context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). The keys expected in the Examples' features associated with context_sparse values. |
-> [Tensor v'4 ByteString] | context_dense_keys: A list of Ncontext_dense string Tensors (scalars). The keys expected in the SequenceExamples' context features associated with dense values. |
-> [Tensor v'5 ByteString] | feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors (scalars). The keys expected in the FeatureLists associated with sparse values. |
-> [Tensor v'6 ByteString] | feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). The keys expected in the SequenceExamples' feature_lists associated with lists of dense values. |
-> TensorList v'7 tcontext_dense | context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). context_dense_defaults[j] provides default values when the SequenceExample's context map lacks context_dense_key[j]. If an empty Tensor is provided for context_dense_defaults[j], then the Feature context_dense_keys[j] is required. The input type is inferred from context_dense_defaults[j], even when it's empty. If context_dense_defaults[j] is not empty, its shape must match context_dense_shapes[j]. |
-> Tensor v'8 ByteString | debug_name: A scalar containing the name of the serialized proto. May contain, for example, table key (descriptive) name for the corresponding serialized proto. This is purely useful for debugging purposes, and the presence of values here has no effect on the output. May also be an empty scalar if no name is available. |
-> ([Tensor Build Int64], TensorList Build context_sparse_types, [Tensor Build Int64], TensorList Build tcontext_dense, [Tensor Build Int64], TensorList Build feature_list_sparse_types, [Tensor Build Int64], TensorList Build feature_list_dense_types) | (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values)
|
:: TensorType out_type | |
=> Tensor v'1 ByteString | serialized: A scalar string containing a serialized TensorProto proto. |
-> Tensor Build out_type | output: A Tensor of type |
Transforms a serialized tensorflow.TensorProto proto into a Tensor.
:: TensorType out_type | |
=> OpParams | |
-> Tensor v'1 ByteString | serialized: A scalar string containing a serialized TensorProto proto. |
-> Tensor Build out_type | output: A Tensor of type |
:: TensorType dtype | |
=> Tensor Build dtype | output: A placeholder tensor that must be replaced using the feed mechanism. |
A placeholder op for a value that will be fed into the computation.
N.B. This operation will fail with an error if it is executed. It is intended as a way to represent a value that will always be fed, and to provide attrs that enable the fed value to be checked at runtime.
:: TensorType dtype | |
=> OpParams | |
-> Tensor Build dtype | output: A placeholder tensor that must be replaced using the feed mechanism. |
:: TensorType dtype | |
=> Shape | shape: The shape of the tensor. The shape can be any partially-specified shape. To be unconstrained, pass in a shape with unknown rank. |
-> Tensor Build dtype | output: A placeholder tensor that must be replaced using the feed mechanism. |
A placeholder op for a value that will be fed into the computation.
N.B. This operation will fail with an error if it is executed. It is intended as a way to represent a value that will always be fed, and to provide attrs that enable the fed value to be checked at runtime.
:: TensorType dtype | |
=> OpParams | |
-> Shape | shape: The shape of the tensor. The shape can be any partially-specified shape. To be unconstrained, pass in a shape with unknown rank. |
-> Tensor Build dtype | output: A placeholder tensor that must be replaced using the feed mechanism. |
:: TensorType dtype | |
=> Shape | shape: The (possibly partial) shape of the tensor. |
-> Tensor v'1 dtype | input: The default value to produce when |
-> Tensor Build dtype | output: A placeholder tensor that defaults to |
A placeholder op that passes through input
when its output is not fed.
Compute the polygamma function \(psi^{(n)}(x)\).
The polygamma function is defined as:
``` psi^{(n)}(x) = frac{d^n}{dx^n} psi(x) ``` where \(psi(x)\) is the digamma function.
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Computes the power of one value to another.
Given a tensor x
and a tensor y
, this operation computes \(x^y\) for
corresponding elements in x
and y
. For example:
```
# tensor x
is [[2, 2]], [3, 3]]
# tensor y
is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
:: TensorType t | |
=> Tensor v'1 t | input |
-> Tensor Build t | output |
An identity op that triggers an error if a gradient is requested.
When executed in a graph, this op outputs its input tensor as-is.
When building ops to compute gradients, the TensorFlow gradient system will return an error when trying to lookup the gradient of this op, because no gradient must ever be registered for this function. This op exists to prevent subtle bugs from silently returning unimplemented gradients in some corner cases.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | input |
-> Tensor Build t | output |
:: (MonadBuild m', TensorType t, TensorTypes u) | |
=> Tensor v'1 t | input: The tensor passed to |
-> TensorList v'2 u | data: A list of tensors to print out when op is evaluated. |
-> m' (Tensor Value t) | output: = The unmodified |
Prints a list of tensors.
Passes input
through to output
and prints `data` when evaluating.
:: (MonadBuild m', TensorType t, TensorTypes u) | |
=> OpParams | |
-> Tensor v'1 t | input: The tensor passed to |
-> TensorList v'2 u | data: A list of tensors to print out when op is evaluated. |
-> m' (Tensor Value t) | output: = The unmodified |
:: MonadBuild m' | |
=> m' (Tensor Ref ByteString) | handle: The handle to the queue. |
A queue that produces elements sorted by the first component value.
Note that the PriorityQueue requires the first component of any element to be a scalar int64, in addition to the other elements declared by component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will all require (resp. output) one extra entry in their input (resp. output) lists.
:: MonadBuild m' | |
=> OpParams | |
-> m' (Tensor Ref ByteString) | handle: The handle to the queue. |
:: MonadBuild m' | |
=> m' ResourceHandle | handle: The handle to the queue. |
A queue that produces elements sorted by the first component value.
Note that the PriorityQueue requires the first component of any element to be a scalar int64, in addition to the other elements declared by component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will all require (resp. output) one extra entry in their input (resp. output) lists.
:: MonadBuild m' | |
=> OpParams | |
-> m' ResourceHandle | handle: The handle to the queue. |
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | input: The tensor to reduce. |
-> Tensor v'2 tidx | reduction_indices: The dimensions to reduce. |
-> Tensor Build t | output: The reduced tensor. |
Computes the product of elements across dimensions of a tensor.
Reduces input
along the dimensions given in reduction_indices
. Unless
keep_dims
is true, the rank of the tensor is reduced by 1 for each entry in
reduction_indices
. If keep_dims
is true, the reduced dimensions are
retained with length 1.
:: OneOf `[Complex Double, Complex Float, Double, Float]` t | |
=> Tensor v'1 t | input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form matrices of size `[M, N]`. Let |
-> (Tensor Build t, Tensor Build t) | (q, r) |
Computes the QR decompositions of one or more matrices.
Computes the QR decomposition of each inner matrix in tensor
such that
`tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
```prettyprint # a is a tensor. # q is a tensor of orthonormal matrices. # r is a tensor of upper triangular matrices. q, r = qr(a) q_full, r_full = qr(a, full_matrices=True) ```
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | input: Tensor to quantize and then dequantize. |
-> Tensor Build t | output |
Quantizes then dequantizes a tensor.
This op simulates the precision loss from the quantized forward pass by: 1. Quantizing the tensor to fixed point numbers, which should match the target quantization method when it is used in inference. 2. Dequantizing it back to floating point numbers for the following ops, most likely matmul.
There are different ways to quantize. This version does not use the full range of the output type, choosing to elide the lowest possible value for symmetry (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to 0.
To perform this op, we first find the range of values in our tensor. The range we use is always centered on 0, so we find m such that
- m = max(abs(input_min), abs(input_max)) if range_given is true,
- m = max(max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
Our input tensor range is then [-m, m].
Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed]. If signed_input is true, this is
- min_fixed, max_fixed
- =
- -(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1
- .
Otherwise, if signed_input is false, the fixed-point range is
- min_fixed, max_fixed
- = [0, (1 << num_bits) - 1].
From this we compute our scaling factor, s:
s = (max_fixed - min_fixed) / (2 * m).
Now we can quantize and dequantize the elements of our tensor. An element e is transformed into e':
e' = (e * s).round_to_nearest() / s.
Note that we have a different number of buckets in the signed vs. unsigned cases. For example, if num_bits == 8, we get 254 buckets in the signed case vs. 255 in the unsigned case.
For example, suppose num_bits = 8 and m = 1. Then
- min_fixed, max_fixed
- = [-127, 127], and s = (127 + 127) / 2 = 127.
Given the vector {-1, -0.5, 0, 0.3}, this is quantized to {-127, -63, 0, 38}, and dequantized to {-1, -63.0127, 0, 38.0127}.
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> Tensor v'1 tinput | input |
-> Tensor v'2 Float | input_min: The float value that the minimum quantized input value represents. |
-> Tensor v'3 Float | input_max: The float value that the maximum quantized input value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (output, output_min, output_max)
|
Convert the quantized input
tensor into a lower-precision output
, using the
actual distribution of the values to maximize the usage of the lower bit depth and adjusting the output min and max ranges accordingly.
- input_min, input_max
- are scalar floats that specify the range for the float
interpretation of the
input
data. For example, if input_min is -1.0f and input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
This operator tries to squeeze as much precision as possible into an output with a lower bit depth by calculating the actual min and max values found in the data. For example, maybe that quint16 input has no values lower than 16,384 and none higher than 49,152. That means only half the range is actually needed, all the float interpretations are between -0.5f and 0.5f, so if we want to compress the data into a quint8 output, we can use that range rather than the theoretical -1.0f to 1.0f that is suggested by the input min and max.
In practice, this is most useful for taking output from operations like QuantizedMatMul that can produce higher bit-depth outputs than their inputs and may have large potential output ranges, but in practice have a distribution of input values that only uses a small fraction of the possible range. By feeding that output into this operator, we can reduce it from 32 bits down to 8 with minimal loss of accuracy.
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> OpParams | |
-> Tensor v'1 tinput | input |
-> Tensor v'2 Float | input_min: The float value that the minimum quantized input value represents. |
-> Tensor v'3 Float | input_max: The float value that the maximum quantized input value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (output, output_min, output_max)
|
:: OneOf `[Int16, Int32, Word16, Word8]` t | |
=> Tensor v'1 Float | input |
-> Tensor v'2 Float | min_range: The minimum scalar value possibly produced for the input. |
-> Tensor v'3 Float | max_range: The maximum scalar value possibly produced for the input. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (output, output_min, output_max)
|
Quantize the input
tensor of type float to output
tensor of type T
.
- min_range, max_range
- are scalar floats that specify the range for
the
input
data. Themode
attribute controls exactly which calculations are used to convert the float values to their quantized equivalents.
In MIN_COMBINED
mode, each value of the tensor will undergo the following:
``` out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) if T == qint8, out[i] -= (range(T) + 1) / 2.0 ``` here `range(T) = numeric_limitsT::max() - numeric_limitsT::min()`
- MIN_COMBINED Mode Example*
Assume the input is type float and has a possible range of [0.0, 6.0] and the output type is quint8 ([0, 255]). The min_range and max_range values should be specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each value of the input by 255/6 and cast to quint8.
If the output type was qint8 ([-128, 127]), the operation will additionally subtract each value by 128 prior to casting, so that the range of values aligns with the range of qint8.
If the mode is MIN_FIRST
, then this approach is used:
``` number_of_steps = 1 << (# of bits in T) range_adjust = number_of_steps / (number_of_steps - 1) range = (range_max - range_min) * range_adjust range_scale = number_of_steps / range quantized = round(input * range_scale) - round(range_min * range_scale) + numeric_limitsT::min() quantized = max(quantized, numeric_limitsT::min()) quantized = min(quantized, numeric_limitsT::max()) ```
The biggest difference between this and MIN_COMBINED is that the minimum range is rounded first, before it's subtracted from the rounded value. With MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing and dequantizing will introduce a larger and larger error.
One thing to watch out for is that the operator may choose to adjust the requested minimum and maximum values slightly during the quantization process, so you should always use the output ports as the range for further calculations. For example, if the requested minimum and maximum values are close to equal, they will be separated by a small epsilon value to prevent ill-formed quantized buffers from being created. Otherwise, you can end up with buffers where all the quantized values map to the same float value, which causes problems for operations that have to perform further calculations on them.
:: OneOf `[Int16, Int32, Word16, Word8]` t | |
=> OpParams | |
-> Tensor v'1 Float | input |
-> Tensor v'2 Float | min_range: The minimum scalar value possibly produced for the input. |
-> Tensor v'3 Float | max_range: The maximum scalar value possibly produced for the input. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (output, output_min, output_max)
|
:: OneOf `[Int16, Int32, Word16, Word8]` t | |
=> Tensor v'1 t | input: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Float | min_input: The float value that the lowest quantized input value represents. |
-> Tensor v'3 Float | max_input: The float value that the highest quantized input value represents. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (output, min_output, max_output)
|
Produces the average pool of the input tensor for quantized types.
:: OneOf `[Int16, Int32, Word16, Word8]` t | |
=> OpParams | |
-> Tensor v'1 t | input: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Float | min_input: The float value that the lowest quantized input value represents. |
-> Tensor v'3 Float | max_input: The float value that the highest quantized input value represents. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (output, min_output, max_output)
|
quantizedBatchNormWithGlobalNormalization
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> Bool | scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. |
-> Float | variance_epsilon: A small float number to avoid dividing by 0. |
-> Tensor v'1 tinput | t: A 4D input Tensor. |
-> Tensor v'2 Float | t_min: The value represented by the lowest quantized input. |
-> Tensor v'3 Float | t_max: The value represented by the highest quantized input. |
-> Tensor v'4 tinput | m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'5 Float | m_min: The value represented by the lowest quantized mean. |
-> Tensor v'6 Float | m_max: The value represented by the highest quantized mean. |
-> Tensor v'7 tinput | v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'8 Float | v_min: The value represented by the lowest quantized variance. |
-> Tensor v'9 Float | v_max: The value represented by the highest quantized variance. |
-> Tensor v'10 tinput | beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. |
-> Tensor v'11 Float | beta_min: The value represented by the lowest quantized offset. |
-> Tensor v'12 Float | beta_max: The value represented by the highest quantized offset. |
-> Tensor v'13 tinput | gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor. |
-> Tensor v'14 Float | gamma_min: The value represented by the lowest quantized gamma. |
-> Tensor v'15 Float | gamma_max: The value represented by the highest quantized gamma. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (result, result_min, result_max)
|
Quantized Batch normalization.
This op is deprecated and will be removed in the future. Prefer `tf.nn.batch_normalization`.
quantizedBatchNormWithGlobalNormalization'
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> OpParams | |
-> Bool | scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. |
-> Float | variance_epsilon: A small float number to avoid dividing by 0. |
-> Tensor v'1 tinput | t: A 4D input Tensor. |
-> Tensor v'2 Float | t_min: The value represented by the lowest quantized input. |
-> Tensor v'3 Float | t_max: The value represented by the highest quantized input. |
-> Tensor v'4 tinput | m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'5 Float | m_min: The value represented by the lowest quantized mean. |
-> Tensor v'6 Float | m_max: The value represented by the highest quantized mean. |
-> Tensor v'7 tinput | v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. |
-> Tensor v'8 Float | v_min: The value represented by the lowest quantized variance. |
-> Tensor v'9 Float | v_max: The value represented by the highest quantized variance. |
-> Tensor v'10 tinput | beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. |
-> Tensor v'11 Float | beta_min: The value represented by the lowest quantized offset. |
-> Tensor v'12 Float | beta_max: The value represented by the highest quantized offset. |
-> Tensor v'13 tinput | gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor. |
-> Tensor v'14 Float | gamma_min: The value represented by the lowest quantized gamma. |
-> Tensor v'15 Float | gamma_max: The value represented by the highest quantized gamma. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (result, result_min, result_max)
|
:: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> Tensor v'1 t1 | input |
-> Tensor v'2 t2 | bias: A 1D bias Tensor with size matching the last dimension of |
-> Tensor v'3 Float | min_input: The float value that the lowest quantized input value represents. |
-> Tensor v'4 Float | max_input: The float value that the highest quantized input value represents. |
-> Tensor v'5 Float | min_bias: The float value that the lowest quantized bias value represents. |
-> Tensor v'6 Float | max_bias: The float value that the highest quantized bias value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (output, min_out, max_out)
|
Adds Tensor bias
to Tensor input
for Quantized types.
Broadcasts the values of bias on dimensions 0..N-2 of input
.
:: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> OpParams | |
-> Tensor v'1 t1 | input |
-> Tensor v'2 t2 | bias: A 1D bias Tensor with size matching the last dimension of |
-> Tensor v'3 Float | min_input: The float value that the lowest quantized input value represents. |
-> Tensor v'4 Float | max_input: The float value that the highest quantized input value represents. |
-> Tensor v'5 Float | min_bias: The float value that the lowest quantized bias value represents. |
-> Tensor v'6 Float | max_bias: The float value that the highest quantized bias value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (output, min_out, max_out)
|
:: TensorType t | |
=> Tensor v'1 Int32 | concat_dim: 0-D. The dimension along which to concatenate. Must be in the range [0, rank(values)). |
-> [Tensor v'2 t] | values: The |
-> [Tensor v'3 Float] | input_mins: The minimum scalar values for each of the input tensors. |
-> [Tensor v'4 Float] | input_maxes: The maximum scalar values for each of the input tensors. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (output, output_min, output_max)
|
Concatenates quantized tensors along one dimension.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 Int32 | concat_dim: 0-D. The dimension along which to concatenate. Must be in the range [0, rank(values)). |
-> [Tensor v'2 t] | values: The |
-> [Tensor v'3 Float] | input_mins: The minimum scalar values for each of the input tensors. |
-> [Tensor v'4 Float] | input_maxes: The maximum scalar values for each of the input tensors. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (output, output_min, output_max)
|
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` tfilter, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> Tensor v'1 tinput | input |
-> Tensor v'2 tfilter | filter: filter's input_depth dimension must match input's depth dimensions. |
-> Tensor v'3 Float | min_input: The float value that the lowest quantized input value represents. |
-> Tensor v'4 Float | max_input: The float value that the highest quantized input value represents. |
-> Tensor v'5 Float | min_filter: The float value that the lowest quantized filter value represents. |
-> Tensor v'6 Float | max_filter: The float value that the highest quantized filter value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (output, min_output, max_output)
|
Computes a 2D convolution given quantized 4D input and filter tensors.
The inputs are quantized tensors where the lowest value represents the real number of the associated minimum, and the highest represents the maximum. This means that you can only interpret the quantized output in the same way, by taking the returned minimum and maximum values into account.
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` tfilter, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> OpParams | |
-> Tensor v'1 tinput | input |
-> Tensor v'2 tfilter | filter: filter's input_depth dimension must match input's depth dimensions. |
-> Tensor v'3 Float | min_input: The float value that the lowest quantized input value represents. |
-> Tensor v'4 Float | max_input: The float value that the highest quantized input value represents. |
-> Tensor v'5 Float | min_filter: The float value that the lowest quantized filter value represents. |
-> Tensor v'6 Float | max_filter: The float value that the highest quantized filter value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (output, min_output, max_output)
|
:: OneOf `[Int16, Int32, Word16, Word8]` t | |
=> Tensor v'1 t | x: A 4D input Tensor. |
-> Tensor v'2 Float | x_min: The value represented by the lowest quantized input. |
-> Tensor v'3 Float | x_max: The value represented by the highest quantized input. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (y, y_min, y_max)
|
Quantized Instance normalization.
:: OneOf `[Int16, Int32, Word16, Word8]` t | |
=> OpParams | |
-> Tensor v'1 t | x: A 4D input Tensor. |
-> Tensor v'2 Float | x_min: The value represented by the lowest quantized input. |
-> Tensor v'3 Float | x_max: The value represented by the highest quantized input. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (y, y_min, y_max)
|
:: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` toutput) | |
=> Tensor v'1 t1 | a: Must be a two-dimensional tensor. |
-> Tensor v'2 t2 | b: Must be a two-dimensional tensor. |
-> Tensor v'3 Float | min_a: The float value that the lowest quantized |
-> Tensor v'4 Float | max_a: The float value that the highest quantized |
-> Tensor v'5 Float | min_b: The float value that the lowest quantized |
-> Tensor v'6 Float | max_b: The float value that the highest quantized |
-> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) | (out, min_out, max_out)
|
Perform a quantized matrix multiplication of a
by the matrix b
.
The inputs must be two-dimensional matrices and the inner dimension of
a
(after being transposed if transpose_a
is non-zero) must match the
outer dimension of b
(after being transposed if transposed_b
is
non-zero).
:: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` toutput) | |
=> OpParams | |
-> Tensor v'1 t1 | a: Must be a two-dimensional tensor. |
-> Tensor v'2 t2 | b: Must be a two-dimensional tensor. |
-> Tensor v'3 Float | min_a: The float value that the lowest quantized |
-> Tensor v'4 Float | max_a: The float value that the highest quantized |
-> Tensor v'5 Float | min_b: The float value that the lowest quantized |
-> Tensor v'6 Float | max_b: The float value that the highest quantized |
-> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) | (out, min_out, max_out)
|
:: OneOf `[Int16, Int32, Word16, Word8]` t | |
=> Tensor v'1 t | input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. |
-> Tensor v'2 Float | min_input: The float value that the lowest quantized input value represents. |
-> Tensor v'3 Float | max_input: The float value that the highest quantized input value represents. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (output, min_output, max_output)
|
Produces the max pool of the input tensor for quantized types.
:: OneOf `[Int16, Int32, Word16, Word8]` t | |
=> OpParams | |
-> Tensor v'1 t | input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. |
-> Tensor v'2 Float | min_input: The float value that the lowest quantized input value represents. |
-> Tensor v'3 Float | max_input: The float value that the highest quantized input value represents. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (output, min_output, max_output)
|
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> Tensor v'1 tinput | features |
-> Tensor v'2 Float | min_features: The float value that the lowest quantized value represents. |
-> Tensor v'3 Float | max_features: The float value that the highest quantized value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (activations, min_activations, max_activations)
|
Computes Quantized Rectified Linear: `max(features, 0)`
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> OpParams | |
-> Tensor v'1 tinput | features |
-> Tensor v'2 Float | min_features: The float value that the lowest quantized value represents. |
-> Tensor v'3 Float | max_features: The float value that the highest quantized value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (activations, min_activations, max_activations)
|
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> Tensor v'1 tinput | features |
-> Tensor v'2 Float | min_features: The float value that the lowest quantized value represents. |
-> Tensor v'3 Float | max_features: The float value that the highest quantized value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (activations, min_activations, max_activations)
|
Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> OpParams | |
-> Tensor v'1 tinput | features |
-> Tensor v'2 Float | min_features: The float value that the lowest quantized value represents. |
-> Tensor v'3 Float | max_features: The float value that the highest quantized value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (activations, min_activations, max_activations)
|
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> Tensor v'1 tinput | features |
-> Tensor v'2 Float | max_value |
-> Tensor v'3 Float | min_features: The float value that the lowest quantized value represents. |
-> Tensor v'4 Float | max_features: The float value that the highest quantized value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (activations, min_activations, max_activations)
|
Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> OpParams | |
-> Tensor v'1 tinput | features |
-> Tensor v'2 Float | max_value |
-> Tensor v'3 Float | min_features: The float value that the lowest quantized value represents. |
-> Tensor v'4 Float | max_features: The float value that the highest quantized value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (activations, min_activations, max_activations)
|
:: (TensorType t, OneOf `[Int32, Int64]` tshape) | |
=> Tensor v'1 t | tensor |
-> Tensor v'2 tshape | shape: Defines the shape of the output tensor. |
-> Tensor v'3 Float | input_min: The minimum value of the input. |
-> Tensor v'4 Float | input_max: The maximum value of the input. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (output, output_min, output_max)
|
Reshapes a quantized tensor as per the Reshape op.
```
:: (TensorType t, OneOf `[Int32, Int64]` tshape) | |
=> OpParams | |
-> Tensor v'1 t | tensor |
-> Tensor v'2 tshape | shape: Defines the shape of the output tensor. |
-> Tensor v'3 Float | input_min: The minimum value of the input. |
-> Tensor v'4 Float | input_max: The maximum value of the input. |
-> (Tensor Build t, Tensor Build Float, Tensor Build Float) | (output, output_min, output_max)
|
:: MonadBuild m' | |
=> Tensor Ref ByteString | handle: The handle to a queue. |
-> m' ControlNode |
Closes the given queue.
This operation signals that no more elements will be enqueued in the given queue. Subsequent Enqueue(Many) operations will fail. Subsequent Dequeue(Many) operations will continue to succeed if sufficient elements remain in the queue. Subsequent Dequeue(Many) operations that would block will fail immediately.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a queue. |
-> m' ControlNode |
:: MonadBuild m' | |
=> ResourceHandle | handle: The handle to a queue. |
-> m' ControlNode |
Closes the given queue.
This operation signals that no more elements will be enqueued in the given queue. Subsequent Enqueue(Many) operations will fail. Subsequent Dequeue(Many) operations will continue to succeed if sufficient elements remain in the queue. Subsequent Dequeue(Many) operations that would block will fail immediately.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a queue. |
-> m' ControlNode |
:: (MonadBuild m', TensorTypes component_types) | |
=> Tensor Ref ByteString | handle: The handle to a queue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
Dequeues a tuple of one or more tensors from the given queue.
This operation has k outputs, where k is the number of components in the tuples stored in the given queue, and output i is the ith component of the dequeued tuple.
N.B. If the queue is empty, this operation will block until an element
has been dequeued (or timeout_ms
elapses, if specified).
:: (MonadBuild m', TensorTypes component_types) | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a queue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
:: (MonadBuild m', TensorTypes component_types) | |
=> Tensor Ref ByteString | handle: The handle to a queue. |
-> Tensor v'2 Int32 | n: The number of tuples to dequeue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
Dequeues n tuples of one or more tensors from the given queue.
If the queue is closed and there are fewer than n elements, then an OutOfRange error is returned.
This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components in the dequeued tuple will have size n in the 0th dimension.
This operation has k outputs, where k is the number of components in the tuples stored in the given queue, and output i is the ith component of the dequeued tuple.
N.B. If the queue is empty, this operation will block until n elements
have been dequeued (or timeout_ms
elapses, if specified).
:: (MonadBuild m', TensorTypes component_types) | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a queue. |
-> Tensor v'2 Int32 | n: The number of tuples to dequeue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
:: (MonadBuild m', TensorTypes component_types) | |
=> ResourceHandle | handle: The handle to a queue. |
-> Tensor v'2 Int32 | n: The number of tuples to dequeue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
Dequeues n tuples of one or more tensors from the given queue.
If the queue is closed and there are fewer than n elements, then an OutOfRange error is returned.
This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components in the dequeued tuple will have size n in the 0th dimension.
This operation has k outputs, where k is the number of components in the tuples stored in the given queue, and output i is the ith component of the dequeued tuple.
N.B. If the queue is empty, this operation will block until n elements
have been dequeued (or timeout_ms
elapses, if specified).
:: (MonadBuild m', TensorTypes component_types) | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a queue. |
-> Tensor v'2 Int32 | n: The number of tuples to dequeue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
:: (MonadBuild m', TensorTypes component_types) | |
=> Tensor Ref ByteString | handle: The handle to a queue. |
-> Tensor v'2 Int32 | n: The number of tuples to dequeue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
Dequeues n tuples of one or more tensors from the given queue.
This operation is not supported by all queues. If a queue does not support DequeueUpTo, then an Unimplemented error is returned.
If the queue is closed and there are more than 0 but less than n elements
remaining, then instead of returning an OutOfRange error like
QueueDequeueMany, less than n
elements are returned immediately. If the queue
is closed and there are 0 elements left in the queue, then an OutOfRange
error is returned just like in QueueDequeueMany. Otherwise the behavior
is identical to QueueDequeueMany:
This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components in the dequeued tuple will have size n in the 0th dimension.
This operation has k outputs, where k is the number of components in the tuples stored in the given queue, and output i is the ith component of the dequeued tuple.
:: (MonadBuild m', TensorTypes component_types) | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a queue. |
-> Tensor v'2 Int32 | n: The number of tuples to dequeue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
:: (MonadBuild m', TensorTypes component_types) | |
=> ResourceHandle | handle: The handle to a queue. |
-> Tensor v'2 Int32 | n: The number of tuples to dequeue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
Dequeues n tuples of one or more tensors from the given queue.
This operation is not supported by all queues. If a queue does not support DequeueUpTo, then an Unimplemented error is returned.
If the queue is closed and there are more than 0 but less than n elements
remaining, then instead of returning an OutOfRange error like
QueueDequeueMany, less than n
elements are returned immediately. If the queue
is closed and there are 0 elements left in the queue, then an OutOfRange
error is returned just like in QueueDequeueMany. Otherwise the behavior
is identical to QueueDequeueMany:
This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components in the dequeued tuple will have size n in the 0th dimension.
This operation has k outputs, where k is the number of components in the tuples stored in the given queue, and output i is the ith component of the dequeued tuple.
:: (MonadBuild m', TensorTypes component_types) | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a queue. |
-> Tensor v'2 Int32 | n: The number of tuples to dequeue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
:: (MonadBuild m', TensorTypes component_types) | |
=> ResourceHandle | handle: The handle to a queue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
Dequeues a tuple of one or more tensors from the given queue.
This operation has k outputs, where k is the number of components in the tuples stored in the given queue, and output i is the ith component of the dequeued tuple.
N.B. If the queue is empty, this operation will block until an element
has been dequeued (or timeout_ms
elapses, if specified).
:: (MonadBuild m', TensorTypes component_types) | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a queue. |
-> m' (TensorList Value component_types) | components: One or more tensors that were dequeued as a tuple. |
:: (MonadBuild m', TensorTypes tcomponents) | |
=> Tensor Ref ByteString | handle: The handle to a queue. |
-> TensorList v'2 tcomponents | components: One or more tensors from which the enqueued tensors should be taken. |
-> m' ControlNode |
Enqueues a tuple of one or more tensors in the given queue.
The components input has k elements, which correspond to the components of tuples stored in the given queue.
N.B. If the queue is full, this operation will block until the given
element has been enqueued (or timeout_ms
elapses, if specified).
:: (MonadBuild m', TensorTypes tcomponents) | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a queue. |
-> TensorList v'2 tcomponents | components: One or more tensors from which the enqueued tensors should be taken. |
-> m' ControlNode |
:: (MonadBuild m', TensorTypes tcomponents) | |
=> Tensor Ref ByteString | handle: The handle to a queue. |
-> TensorList v'2 tcomponents | components: One or more tensors from which the enqueued tensors should be taken. |
-> m' ControlNode |
Enqueues zero or more tuples of one or more tensors in the given queue.
This operation slices each component tensor along the 0th dimension to make multiple queue elements. All of the tuple components must have the same size in the 0th dimension.
The components input has k elements, which correspond to the components of tuples stored in the given queue.
N.B. If the queue is full, this operation will block until the given
elements have been enqueued (or timeout_ms
elapses, if specified).
:: (MonadBuild m', TensorTypes tcomponents) | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a queue. |
-> TensorList v'2 tcomponents | components: One or more tensors from which the enqueued tensors should be taken. |
-> m' ControlNode |
:: (MonadBuild m', TensorTypes tcomponents) | |
=> ResourceHandle | handle: The handle to a queue. |
-> TensorList v'2 tcomponents | components: One or more tensors from which the enqueued tensors should be taken. |
-> m' ControlNode |
Enqueues zero or more tuples of one or more tensors in the given queue.
This operation slices each component tensor along the 0th dimension to make multiple queue elements. All of the tuple components must have the same size in the 0th dimension.
The components input has k elements, which correspond to the components of tuples stored in the given queue.
N.B. If the queue is full, this operation will block until the given
elements have been enqueued (or timeout_ms
elapses, if specified).
:: (MonadBuild m', TensorTypes tcomponents) | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a queue. |
-> TensorList v'2 tcomponents | components: One or more tensors from which the enqueued tensors should be taken. |
-> m' ControlNode |
:: (MonadBuild m', TensorTypes tcomponents) | |
=> ResourceHandle | handle: The handle to a queue. |
-> TensorList v'2 tcomponents | components: One or more tensors from which the enqueued tensors should be taken. |
-> m' ControlNode |
Enqueues a tuple of one or more tensors in the given queue.
The components input has k elements, which correspond to the components of tuples stored in the given queue.
N.B. If the queue is full, this operation will block until the given
element has been enqueued (or timeout_ms
elapses, if specified).
:: (MonadBuild m', TensorTypes tcomponents) | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a queue. |
-> TensorList v'2 tcomponents | components: One or more tensors from which the enqueued tensors should be taken. |
-> m' ControlNode |
:: MonadBuild m' | |
=> Tensor Ref ByteString | handle: The handle to a queue. |
-> m' (Tensor Value Int32) | size: The number of elements in the given queue. |
Computes the number of elements in the given queue.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a queue. |
-> m' (Tensor Value Int32) | size: The number of elements in the given queue. |
:: MonadBuild m' | |
=> ResourceHandle | handle: The handle to a queue. |
-> m' (Tensor Value Int32) | size: The number of elements in the given queue. |
Computes the number of elements in the given queue.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a queue. |
-> m' (Tensor Value Int32) | size: The number of elements in the given queue. |
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3. |
-> Tensor Build t | output: |
Converts one or more images from RGB to HSV.
Outputs a tensor of the same shape as the images
tensor, containing the HSV
value of the pixels. The output is only well defined if the value in images
are in `[0,1]`.
`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 corresponds to pure red, hue 13 is pure green, and 23 is pure blue.
:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) | |
=> Tensor v'1 t | image: 3-D of shape `[height, width, channels]`. |
-> Tensor v'2 Int64 | size: 1-D of length 2 containing: |
-> m' (Tensor Value t) | output: 3-D of shape `[crop_height, crop_width, channels].` |
Randomly crop image
.
size
is a 1-D int64 tensor with 2 elements representing the crop height and
width. The values must be non negative.
This Op picks a random location in image
and crops a height
by width
rectangle from that location. The random location is picked so the cropped
area will fit inside the original image.
:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) | |
=> OpParams | |
-> Tensor v'1 t | image: 3-D of shape `[height, width, channels]`. |
-> Tensor v'2 Int64 | size: 1-D of length 2 containing: |
-> m' (Tensor Value t) | output: 3-D of shape `[crop_height, crop_width, channels].` |
:: (MonadBuild m', OneOf `[Int32, Int64]` s, OneOf `[Word16, Double, Float]` t) | |
=> Tensor v'1 s | shape: 1-D integer tensor. Shape of independent samples to draw from each distribution described by the shape parameters given in alpha. |
-> Tensor v'2 t | alpha: A tensor in which each scalar is a "shape" parameter describing the associated gamma distribution. |
-> m' (Tensor Value t) | output: A tensor with shape `shape + shape(alpha)`. Each slice `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha. |
Outputs random values from the Gamma distribution(s) described by alpha.
This op uses the algorithm by Marsaglia et al. to acquire samples via transformation-rejection from pairs of uniform and normal random variables. See http://dl.acm.org/citation.cfm?id=358414
:: (MonadBuild m', OneOf `[Int32, Int64]` s, OneOf `[Word16, Double, Float]` t) | |
=> OpParams | |
-> Tensor v'1 s | shape: 1-D integer tensor. Shape of independent samples to draw from each distribution described by the shape parameters given in alpha. |
-> Tensor v'2 t | alpha: A tensor in which each scalar is a "shape" parameter describing the associated gamma distribution. |
-> m' (Tensor Value t) | output: A tensor with shape `shape + shape(alpha)`. Each slice `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha. |
:: (MonadBuild m', TensorType t) | |
=> Tensor v'1 t | value: The tensor to be shuffled. |
-> m' (Tensor Value t) | output: A tensor of same shape and type as |
Randomly shuffles a tensor along its first dimension.
The tensor is shuffled along dimension 0, such that each `value[j]` is mapped to one and only one `output[i]`. For example, a mapping that might occur for a 3x2 tensor is:
```prettyprint [[1, 2], [[5, 6], [3, 4], ==> [1, 2], [5, 6]] [3, 4]] ```
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor v'1 t | value: The tensor to be shuffled. |
-> m' (Tensor Value t) | output: A tensor of same shape and type as |
:: MonadBuild m' | |
=> [DataType] | component_types: The type of each component in a value. |
-> m' (Tensor Ref ByteString) | handle: The handle to the queue. |
A queue that randomizes the order of elements.
:: MonadBuild m' | |
=> OpParams | |
-> [DataType] | component_types: The type of each component in a value. |
-> m' (Tensor Ref ByteString) | handle: The handle to the queue. |
:: MonadBuild m' | |
=> [DataType] | component_types: The type of each component in a value. |
-> m' ResourceHandle | handle: The handle to the queue. |
A queue that randomizes the order of elements.
:: MonadBuild m' | |
=> OpParams | |
-> [DataType] | component_types: The type of each component in a value. |
-> m' ResourceHandle | handle: The handle to the queue. |
:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) | |
=> Tensor v'1 t | shape: The shape of the output tensor. |
-> m' (Tensor Value dtype) | output: A tensor of the specified shape filled with random normal values. |
Outputs random values from a normal distribution.
The generated values will have mean 0 and standard deviation 1.
:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) | |
=> Tensor v'1 t | shape: The shape of the output tensor. |
-> m' (Tensor Value dtype) | output: A tensor of the specified shape filled with uniform random values. |
Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range `[0, 1)`. The lower bound 0 is included in the range, while the upper bound 1 is excluded.
:: (MonadBuild m', OneOf `[Int32, Int64]` tout, OneOf `[Int32, Int64]` t) | |
=> Tensor v'1 t | shape: The shape of the output tensor. |
-> Tensor v'2 tout | minval: 0-D. Inclusive lower bound on the generated integers. |
-> Tensor v'3 tout | maxval: 0-D. Exclusive upper bound on the generated integers. |
-> m' (Tensor Value tout) | output: A tensor of the specified shape filled with uniform random integers. |
Outputs random integers from a uniform distribution.
The generated values are uniform integers in the range `[minval, maxval)`.
The lower bound minval
is included in the range, while the upper bound
maxval
is excluded.
The random integers are slightly biased unless `maxval - minval` is an exact power of two. The bias is small for values of `maxval - minval` significantly smaller than the range of the output (either `2^32` or `2^64`).
:: (MonadBuild m', OneOf `[Int32, Int64]` tout, OneOf `[Int32, Int64]` t) | |
=> OpParams | |
-> Tensor v'1 t | shape: The shape of the output tensor. |
-> Tensor v'2 tout | minval: 0-D. Inclusive lower bound on the generated integers. |
-> Tensor v'3 tout | maxval: 0-D. Exclusive upper bound on the generated integers. |
-> m' (Tensor Value tout) | output: A tensor of the specified shape filled with uniform random integers. |
:: OneOf `[Int32, Int64, Double, Float]` tidx | |
=> Tensor v'1 tidx | start: 0-D (scalar). First entry in the sequence. |
-> Tensor v'2 tidx | limit: 0-D (scalar). Upper limit of sequence, exclusive. |
-> Tensor v'3 tidx | delta: 0-D (scalar). Optional. Default is 1. Number that increments |
-> Tensor Build tidx | output: 1-D. |
Creates a sequence of numbers.
This operation creates a sequence of numbers that begins at start
and
extends by increments of delta
up to but not including limit
.
For example:
```
# start
is 3
# limit
is 18
# delta
is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
```
:: OneOf `[Int32, Int64, Double, Float]` tidx | |
=> OpParams | |
-> Tensor v'1 tidx | start: 0-D (scalar). First entry in the sequence. |
-> Tensor v'2 tidx | limit: 0-D (scalar). Upper limit of sequence, exclusive. |
-> Tensor v'3 tidx | delta: 0-D (scalar). Optional. Default is 1. Number that increments |
-> Tensor Build tidx | output: 1-D. |
:: TensorType t | |
=> Tensor v'1 t | input |
-> Tensor Build Int32 | output |
Returns the rank of a tensor.
This operation returns an integer representing the rank of input
.
For example:
```prettyprint
# t
is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
# shape of tensor t
is [2, 2, 3]
rank(t) ==> 3
```
- *Note**: The rank of a tensor is not the same as the rank of a matrix. The rank of a tensor is the number of indices required to uniquely select each element of the tensor. Rank is also known as "order", "degree", or "ndims."
:: Tensor v'1 ByteString | filename |
-> Tensor Build ByteString | contents |
Reads and outputs the entire contents of the input filename.
:: OpParams | |
-> Tensor v'1 ByteString | filename |
-> Tensor Build ByteString | contents |
:: (MonadBuild m', TensorType dtype) | |
=> ResourceHandle | resource: handle to the resource in which to store the variable. |
-> m' (Tensor Value dtype) | value |
Reads the value of a variable.
The tensor returned by this operation is immutable.
The value returned by this operation is guaranteed to be influenced by all the writes on which this operation depends directly or indirectly, and to not be influenced by any of the writes which depend directly or indirectly on this operation.
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> ResourceHandle | resource: handle to the resource in which to store the variable. |
-> m' (Tensor Value dtype) | value |
:: MonadBuild m' | |
=> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> m' (Tensor Value Int64) | records_produced |
Returns the number of records this Reader has produced.
This is the same as the number of ReaderRead executions that have succeeded.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> m' (Tensor Value Int64) | records_produced |
:: MonadBuild m' | |
=> ResourceHandle | reader_handle: Handle to a Reader. |
-> m' (Tensor Value Int64) | records_produced |
Returns the number of records this Reader has produced.
This is the same as the number of ReaderRead executions that have succeeded.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | reader_handle: Handle to a Reader. |
-> m' (Tensor Value Int64) | records_produced |
:: MonadBuild m' | |
=> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> m' (Tensor Value Int64) | units_completed |
Returns the number of work units this Reader has finished processing.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> m' (Tensor Value Int64) | units_completed |
:: MonadBuild m' | |
=> ResourceHandle | reader_handle: Handle to a Reader. |
-> m' (Tensor Value Int64) | units_completed |
Returns the number of work units this Reader has finished processing.
readerNumWorkUnitsCompletedV2'
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | reader_handle: Handle to a Reader. |
-> m' (Tensor Value Int64) | units_completed |
:: MonadBuild m' | |
=> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> Tensor Ref ByteString | queue_handle: Handle to a Queue, with string work items. |
-> m' (Tensor Value ByteString, Tensor Value ByteString) | (key, value)
|
Returns the next record (key, value pair) produced by a Reader.
Will dequeue from the input queue if necessary (e.g. when the Reader needs to start reading from a new file since it has finished with the previous file).
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> Tensor Ref ByteString | queue_handle: Handle to a Queue, with string work items. |
-> m' (Tensor Value ByteString, Tensor Value ByteString) | (key, value)
|
:: MonadBuild m' | |
=> Tensor Ref ByteString | reader_handle: Handle to a |
-> Tensor Ref ByteString | queue_handle: Handle to a |
-> Tensor v'3 Int64 | num_records: number of records to read from |
-> m' (Tensor Value ByteString, Tensor Value ByteString) | (keys, values)
|
Returns up to num_records
(key, value) pairs produced by a Reader.
Will dequeue from the input queue if necessary (e.g. when the
Reader needs to start reading from a new file since it has finished
with the previous file).
It may return less than num_records
even before the last batch.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | reader_handle: Handle to a |
-> Tensor Ref ByteString | queue_handle: Handle to a |
-> Tensor v'3 Int64 | num_records: number of records to read from |
-> m' (Tensor Value ByteString, Tensor Value ByteString) | (keys, values)
|
:: MonadBuild m' | |
=> ResourceHandle | reader_handle: Handle to a |
-> ResourceHandle | queue_handle: Handle to a |
-> Tensor v'3 Int64 | num_records: number of records to read from |
-> m' (Tensor Value ByteString, Tensor Value ByteString) | (keys, values)
|
Returns up to num_records
(key, value) pairs produced by a Reader.
Will dequeue from the input queue if necessary (e.g. when the
Reader needs to start reading from a new file since it has finished
with the previous file).
It may return less than num_records
even before the last batch.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | reader_handle: Handle to a |
-> ResourceHandle | queue_handle: Handle to a |
-> Tensor v'3 Int64 | num_records: number of records to read from |
-> m' (Tensor Value ByteString, Tensor Value ByteString) | (keys, values)
|
:: MonadBuild m' | |
=> ResourceHandle | reader_handle: Handle to a Reader. |
-> ResourceHandle | queue_handle: Handle to a Queue, with string work items. |
-> m' (Tensor Value ByteString, Tensor Value ByteString) | (key, value)
|
Returns the next record (key, value pair) produced by a Reader.
Will dequeue from the input queue if necessary (e.g. when the Reader needs to start reading from a new file since it has finished with the previous file).
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | reader_handle: Handle to a Reader. |
-> ResourceHandle | queue_handle: Handle to a Queue, with string work items. |
-> m' (Tensor Value ByteString, Tensor Value ByteString) | (key, value)
|
:: MonadBuild m' | |
=> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> m' ControlNode |
Restore a Reader to its initial clean state.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> m' ControlNode |
:: MonadBuild m' | |
=> ResourceHandle | reader_handle: Handle to a Reader. |
-> m' ControlNode |
Restore a Reader to its initial clean state.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | reader_handle: Handle to a Reader. |
-> m' ControlNode |
:: MonadBuild m' | |
=> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> Tensor v'2 ByteString | state: Result of a ReaderSerializeState of a Reader with type matching reader_handle. |
-> m' ControlNode |
Restore a reader to a previously saved state.
Not all Readers support being restored, so this can produce an Unimplemented error.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> Tensor v'2 ByteString | state: Result of a ReaderSerializeState of a Reader with type matching reader_handle. |
-> m' ControlNode |
:: MonadBuild m' | |
=> ResourceHandle | reader_handle: Handle to a Reader. |
-> Tensor v'2 ByteString | state: Result of a ReaderSerializeState of a Reader with type matching reader_handle. |
-> m' ControlNode |
Restore a reader to a previously saved state.
Not all Readers support being restored, so this can produce an Unimplemented error.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | reader_handle: Handle to a Reader. |
-> Tensor v'2 ByteString | state: Result of a ReaderSerializeState of a Reader with type matching reader_handle. |
-> m' ControlNode |
:: MonadBuild m' | |
=> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> m' (Tensor Value ByteString) | state |
Produce a string tensor that encodes the state of a Reader.
Not all Readers support being serialized, so this can produce an Unimplemented error.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | reader_handle: Handle to a Reader. |
-> m' (Tensor Value ByteString) | state |
:: MonadBuild m' | |
=> ResourceHandle | reader_handle: Handle to a Reader. |
-> m' (Tensor Value ByteString) | state |
Produce a string tensor that encodes the state of a Reader.
Not all Readers support being serialized, so this can produce an Unimplemented error.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | reader_handle: Handle to a Reader. |
-> m' (Tensor Value ByteString) | state |
:: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) | |
=> Tensor v'1 t | input |
-> Tensor Build tout | output |
Returns the real part of a complex number.
Given a tensor input
of complex numbers, this operation returns a tensor of
type float
that is the real part of each element in input
. All elements in
input
must be complex numbers of the form \(a + bj\), where *a* is the real
part returned by this operation and *b* is the imaginary part.
For example:
```
# tensor input
is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Returns x / y element-wise for real types.
If x
and y
are reals, this will return the floating-point division.
- NOTE*:
Div
supports broadcasting. More about broadcasting here
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes the reciprocal of x element-wise.
I.e., \(y = 1 / x\).
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Computes the gradient for the inverse of x
wrt its input.
Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy
is the corresponding input gradient.
:: MonadBuild m' | |
=> m' (Tensor Value ByteString) | records: A tensor of shape [batch_size]. |
Emits randomized records.
:: MonadBuild m' | |
=> OpParams | |
-> m' (Tensor Value ByteString) | records: A tensor of shape [batch_size]. |
:: Tensor v'1 ByteString | inputs: The input to be joined. All reduced indices must have non-zero size. |
-> Tensor v'2 Int32 | reduction_indices: The dimensions to reduce over. Dimensions are reduced in the
order specified. Omitting |
-> Tensor Build ByteString | output: Has shape equal to that of the input with reduced dimensions removed or
set to `1` depending on |
Joins a string Tensor across the given dimensions.
Computes the string join across dimensions in the given string Tensor of shape `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input strings with the given separator (default: empty string). Negative indices are counted backwards from the end, with `-1` being equivalent to `n - 1`.
For example:
```
# tensor a
is [["a", "b"], ["c", "d"]]
tf.reduce_join(a, 0) ==> ["ac", "bd"]
tf.reduce_join(a, 1) ==> ["ab", "cd"]
tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
tf.reduce_join(a, [0, 1]) ==> ["acbd"]
tf.reduce_join(a, [1, 0]) ==> ["abcd"]
tf.reduce_join(a, []) ==> ["abcd"]
```
:: OpParams | |
-> Tensor v'1 ByteString | inputs: The input to be joined. All reduced indices must have non-zero size. |
-> Tensor v'2 Int32 | reduction_indices: The dimensions to reduce over. Dimensions are reduced in the
order specified. Omitting |
-> Tensor Build ByteString | output: Has shape equal to that of the input with reduced dimensions removed or
set to `1` depending on |
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref t | data: The tensor to be made available to the child frame. |
-> m' (Tensor Ref t) | output: The same tensor as `data`. |
Creates or finds a child frame, and makes `data` available to the child frame.
The unique frame_name
is used by the Executor
to identify frames. If
is_constant
is true, output
is a constant in the child frame; otherwise
it may be changed in the child frame. At most parallel_iterations
iterations
are run in parallel in the child frame.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor Ref t | data: The tensor to be made available to the child frame. |
-> m' (Tensor Ref t) | output: The same tensor as `data`. |
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref t | data: The tensor to be made available to the parent frame. |
-> m' (Tensor Ref t) | output: The same tensor as `data`. |
Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor Ref t | data: The tensor to be made available to the parent frame. |
-> m' (Tensor Ref t) | output: The same tensor as `data`. |
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref t | input |
-> m' (Tensor Ref t) | output |
Return the same ref tensor as the input ref tensor.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor Ref t | input |
-> m' (Tensor Ref t) | output |
:: (MonadBuild m', TensorType t) | |
=> [Tensor Ref t] | inputs: The input tensors, exactly one of which will become available. |
-> m' (Tensor Ref t, Tensor Value Int32) | (output, value_index)
|
Forwards the value of an available tensor from inputs
to output
.
Merge
waits for at least one of the tensors in inputs
to become available.
It is usually combined with Switch
to implement branching.
Merge
forwards the first tensor for become available to output
, and sets
value_index
to its index in inputs
.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> [Tensor Ref t] | inputs: The input tensors, exactly one of which will become available. |
-> m' (Tensor Ref t, Tensor Value Int32) | (output, value_index)
|
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref t | data: The tensor to be made available to the next iteration. |
-> m' (Tensor Ref t) | output: The same tensor as `data`. |
Makes its input available to the next iteration.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor Ref t | data: The tensor to be made available to the next iteration. |
-> m' (Tensor Ref t) | output: The same tensor as `data`. |
:: (MonadBuild m', TensorType t) | |
=> Tensor v'1 Int32 | index: A scalar that determines the input that gets selected. |
-> [Tensor Ref t] | inputs: A list of ref tensors, one of which will be forwarded to |
-> m' (Tensor Ref t) | output: The forwarded tensor. |
Forwards the index
th element of inputs
to output
.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor v'1 Int32 | index: A scalar that determines the input that gets selected. |
-> [Tensor Ref t] | inputs: A list of ref tensors, one of which will be forwarded to |
-> m' (Tensor Ref t) | output: The forwarded tensor. |
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref t | data: The ref tensor to be forwarded to the appropriate output. |
-> Tensor v'2 Bool | pred: A scalar that specifies which output port will receive data. |
-> m' (Tensor Ref t, Tensor Ref t) | (output_false, output_true) |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | features |
-> Tensor Build t | activations |
Computes rectified linear: `max(features, 0)`.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | features |
-> Tensor Build t | activations |
Computes rectified linear 6: `min(max(features, 0), 6)`.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | gradients: The backpropagated gradients to the corresponding Relu6 operation. |
-> Tensor v'2 t | features: The features passed as input to the corresponding Relu6 operation. |
-> Tensor Build t | backprops: The gradients: `gradients * (features > 0) * (features < 6)`. |
Computes rectified linear 6 gradients for a Relu6 operation.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | gradients: The backpropagated gradients to the corresponding Relu6 operation. |
-> Tensor v'2 t | features: The features passed as input to the corresponding Relu6 operation. |
-> Tensor Build t | backprops: The gradients: `gradients * (features > 0) * (features < 6)`. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | gradients: The backpropagated gradients to the corresponding Relu operation. |
-> Tensor v'2 t | features: The features passed as input to the corresponding Relu operation, OR the outputs of that operation (both work equivalently). |
-> Tensor Build t | backprops: `gradients * (features > 0)`. |
Computes rectified linear gradients for a Relu operation.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | gradients: The backpropagated gradients to the corresponding Relu operation. |
-> Tensor v'2 t | features: The features passed as input to the corresponding Relu operation, OR the outputs of that operation (both work equivalently). |
-> Tensor Build t | backprops: `gradients * (features > 0)`. |
:: OneOf `[Int16, Int32, Word16, Word8]` tinput | |
=> Tensor v'1 tinput | input |
-> Tensor v'2 Float | input_min: The float value that the minimum quantized input value represents. |
-> Tensor v'3 Float | input_max: The float value that the maximum quantized input value represents. |
-> (Tensor Build Float, Tensor Build Float) | (output_min, output_max)
|
Given a quantized tensor described by (input, input_min, input_max), outputs a
range that covers the actual values present in that tensor. This op is typically used to produce the requested_output_min and requested_output_max for Requantize.
:: OneOf `[Int16, Int32, Word16, Word8]` tinput | |
=> OpParams | |
-> Tensor v'1 tinput | input |
-> Tensor v'2 Float | input_min: The float value that the minimum quantized input value represents. |
-> Tensor v'3 Float | input_max: The float value that the maximum quantized input value represents. |
-> (Tensor Build Float, Tensor Build Float) | (output_min, output_max)
|
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> Tensor v'1 tinput | input |
-> Tensor v'2 Float | input_min: The float value that the minimum quantized input value represents. |
-> Tensor v'3 Float | input_max: The float value that the maximum quantized input value represents. |
-> Tensor v'4 Float | requested_output_min: The float value that the minimum quantized output value represents. |
-> Tensor v'5 Float | requested_output_max: The float value that the maximum quantized output value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (output, output_min, output_max)
|
Convert the quantized input
tensor into a lower-precision output
, using the
output range specified with requested_output_min
and requested_output_max
.
- input_min, input_max
- are scalar floats that specify the range for the float
interpretation of the
input
data. For example, if input_min is -1.0f and input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) | |
=> OpParams | |
-> Tensor v'1 tinput | input |
-> Tensor v'2 Float | input_min: The float value that the minimum quantized input value represents. |
-> Tensor v'3 Float | input_max: The float value that the maximum quantized input value represents. |
-> Tensor v'4 Float | requested_output_min: The float value that the minimum quantized output value represents. |
-> Tensor v'5 Float | requested_output_max: The float value that the maximum quantized output value represents. |
-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) | (output, output_min, output_max)
|
:: (TensorType t, OneOf `[Int32, Int64]` tshape) | |
=> Tensor v'1 t | tensor |
-> Tensor v'2 tshape | shape: Defines the shape of the output tensor. |
-> Tensor Build t | output |
Reshapes a tensor.
Given tensor
, this operation returns a tensor that has the same values
as tensor
with shape shape
.
If one component of shape
is the special value -1, the size of that dimension
is computed so that the total size remains constant. In particular, a shape
of `[-1]` flattens into 1-D. At most one component of shape
can be -1.
If shape
is 1-D or higher, then the operation returns a tensor with shape
shape
filled with the values of tensor
. In this case, the number of elements
implied by shape
must be the same as the number of elements in tensor
.
For example:
```prettyprint
# tensor t
is [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor t
has shape [9]
reshape(t, [3, 3]) ==> [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
# tensor t
is [[[1, 1], [2, 2]],
# [[3, 3], [4, 4]]]
# tensor t
has shape [2, 2, 2]
reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
[3, 3, 4, 4]]
# tensor t
is [[[1, 1, 1],
# [2, 2, 2]],
# [[3, 3, 3],
# [4, 4, 4]],
# [[5, 5, 5],
# [6, 6, 6]]]
# tensor t
has shape [3, 2, 3]
# pass '[-1]' to flatten t
reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
# -1 can also be used to infer the shape
# -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2: reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, 6, 6]]]
# tensor t
is [7]
# shape `[]` reshapes to a scalar
reshape(t, []) ==> 7
```
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | images: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Int32 | size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. |
-> Tensor Build Float | resized_images: 4-D with shape `[batch, new_height, new_width, channels]`. |
Resize images
to size
using area interpolation.
Input images can be of different types but output images are always float.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | images: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Int32 | size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. |
-> Tensor Build Float | resized_images: 4-D with shape `[batch, new_height, new_width, channels]`. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | images: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Int32 | size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. |
-> Tensor Build Float | resized_images: 4-D with shape `[batch, new_height, new_width, channels]`. |
Resize images
to size
using bicubic interpolation.
Input images can be of different types but output images are always float.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | images: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Int32 | size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. |
-> Tensor Build Float | resized_images: 4-D with shape `[batch, new_height, new_width, channels]`. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | images: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Int32 | size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. |
-> Tensor Build Float | resized_images: 4-D with shape `[batch, new_height, new_width, channels]`. |
Resize images
to size
using bilinear interpolation.
Input images can be of different types but output images are always float.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | images: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Int32 | size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. |
-> Tensor Build Float | resized_images: 4-D with shape `[batch, new_height, new_width, channels]`. |
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 Float | grads: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 t | original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, The image tensor that was resized. |
-> Tensor Build t | output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients with respect to the input image. Input image must have been float or double. |
Computes the gradient of bilinear interpolation.
:: OneOf `[Word16, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Float | grads: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 t | original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, The image tensor that was resized. |
-> Tensor Build t | output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients with respect to the input image. Input image must have been float or double. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | images: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Int32 | size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. |
-> Tensor Build t | resized_images: 4-D with shape `[batch, new_height, new_width, channels]`. |
Resize images
to size
using nearest neighbor interpolation.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | images: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Int32 | size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. |
-> Tensor Build t | resized_images: 4-D with shape `[batch, new_height, new_width, channels]`. |
:: OneOf `[Int32, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | grads: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Int32 | size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The original input size. |
-> Tensor Build t | output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients with respect to the input image. |
Computes the gradient of nearest neighbor interpolation.
:: OneOf `[Int32, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | grads: 4-D with shape `[batch, height, width, channels]`. |
-> Tensor v'2 Int32 | size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The original input size. |
-> Tensor Build t | output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients with respect to the input image. |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> ResourceHandle | accum_update: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay factor. Must be a scalar. |
-> Tensor v'6 t | epsilon: Constant factor. Must be a scalar. |
-> Tensor v'7 t | grad: The gradient. |
-> m' ControlNode |
Update '*var' according to the adadelta scheme.
accum = rho() * accum + (1 - rho()) * grad.square(); update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; update_accum = rho() * update_accum + (1 - rho()) * update.square(); var -= update;
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> ResourceHandle | accum_update: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay factor. Must be a scalar. |
-> Tensor v'6 t | epsilon: Constant factor. Must be a scalar. |
-> Tensor v'7 t | grad: The gradient. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> m' ControlNode |
Update '*var' according to the adagrad scheme.
accum += grad * grad var -= lr * grad * (1 / sqrt(accum))
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | gradient_accumulator: Should be from a Variable(). |
-> ResourceHandle | gradient_squared_accumulator: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'7 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'8 Int64 | global_step: Training step number. Must be a scalar. |
-> m' ControlNode |
Update '*var' according to the proximal adagrad scheme.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | gradient_accumulator: Should be from a Variable(). |
-> ResourceHandle | gradient_squared_accumulator: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'7 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'8 Int64 | global_step: Training step number. Must be a scalar. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | m: Should be from a Variable(). |
-> ResourceHandle | v: Should be from a Variable(). |
-> Tensor v'4 t | beta1_power: Must be a scalar. |
-> Tensor v'5 t | beta2_power: Must be a scalar. |
-> Tensor v'6 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'7 t | beta1: Momentum factor. Must be a scalar. |
-> Tensor v'8 t | beta2: Momentum factor. Must be a scalar. |
-> Tensor v'9 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'10 t | grad: The gradient. |
-> m' ControlNode |
Update '*var' according to the Adam algorithm.
lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | m: Should be from a Variable(). |
-> ResourceHandle | v: Should be from a Variable(). |
-> Tensor v'4 t | beta1_power: Must be a scalar. |
-> Tensor v'5 t | beta2_power: Must be a scalar. |
-> Tensor v'6 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'7 t | beta1: Momentum factor. Must be a scalar. |
-> Tensor v'8 t | beta2: Momentum factor. Must be a scalar. |
-> Tensor v'9 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'10 t | grad: The gradient. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | mg: Should be from a Variable(). |
-> ResourceHandle | ms: Should be from a Variable(). |
-> ResourceHandle | mom: Should be from a Variable(). |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'7 t | momentum |
-> Tensor v'8 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'9 t | grad: The gradient. |
-> m' ControlNode |
Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment (i.e., the variance) for normalization, as opposed to regular RMSProp, which uses the (uncentered) second moment. This often helps with training, but is slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will update even if the grad is zero, but in this sparse implementation, mg, ms, and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
mg <- rho * mg_{t-1} + (1-rho) * grad ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) var <- var - mom
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | mg: Should be from a Variable(). |
-> ResourceHandle | ms: Should be from a Variable(). |
-> ResourceHandle | mom: Should be from a Variable(). |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'7 t | momentum |
-> Tensor v'8 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'9 t | grad: The gradient. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> ResourceHandle | linear: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | l1: L1 regulariation. Must be a scalar. |
-> Tensor v'7 t | l2: L2 regulariation. Must be a scalar. |
-> Tensor v'8 t | lr_power: Scaling factor. Must be a scalar. |
-> m' ControlNode |
Update '*var' according to the Ftrl-proximal scheme.
accum_new = accum + grad * grad linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> ResourceHandle | linear: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | l1: L1 regulariation. Must be a scalar. |
-> Tensor v'7 t | l2: L2 regulariation. Must be a scalar. |
-> Tensor v'8 t | lr_power: Scaling factor. Must be a scalar. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> Tensor v'2 t | alpha: Scaling factor. Must be a scalar. |
-> Tensor v'3 t | delta: The change. |
-> m' ControlNode |
Update '*var' by subtracting alpha
* delta
from it.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | momentum: Momentum. Must be a scalar. |
-> m' ControlNode |
Update '*var' according to the momentum scheme. Set use_nesterov = True if you
want to use Nesterov momentum.
accum = accum * momentum + grad var -= lr * accum
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 t | momentum: Momentum. Must be a scalar. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'5 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'6 t | grad: The gradient. |
-> m' ControlNode |
Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'4 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'5 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'6 t | grad: The gradient. |
-> m' ControlNode |
resourceApplyProximalGradientDescent
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> Tensor v'2 t | alpha: Scaling factor. Must be a scalar. |
-> Tensor v'3 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'4 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'5 t | delta: The change. |
-> m' ControlNode |
Update '*var' as FOBOS algorithm with fixed learning rate.
prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
resourceApplyProximalGradientDescent'
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> Tensor v'2 t | alpha: Scaling factor. Must be a scalar. |
-> Tensor v'3 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'4 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'5 t | delta: The change. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | ms: Should be from a Variable(). |
-> ResourceHandle | mom: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'6 t | momentum |
-> Tensor v'7 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'8 t | grad: The gradient. |
-> m' ControlNode |
Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will update even if the grad is zero, but in this sparse implementation, ms and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | ms: Should be from a Variable(). |
-> ResourceHandle | mom: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'6 t | momentum |
-> Tensor v'7 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'8 t | grad: The gradient. |
-> m' ControlNode |
:: (MonadBuild m', TensorType dtype, OneOf `[Int32, Int64]` tindices) | |
=> ResourceHandle | resource |
-> Tensor v'2 tindices | indices |
-> m' (Tensor Value dtype) | output |
Gather slices from the variable pointed to by resource
according to indices
.
indices
must be an integer tensor of any dimension (usually 0-D or 1-D).
Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
```python # Scalar indices output[:, ..., :] = params[indices, :, ... :]
# Vector indices output[i, :, ..., :] = params[indices[i], :, ... :]
# Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] ```
:: (MonadBuild m', TensorType dtype, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> ResourceHandle | resource |
-> Tensor v'2 tindices | indices |
-> m' (Tensor Value dtype) | output |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype, OneOf `[Int32, Int64]` tindices) | |
=> ResourceHandle | resource: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 dtype | updates: A tensor of updated values to add to |
-> m' ControlNode |
Adds sparse updates to the variable referenced by resource
.
This operation computes
# Scalar indices ref[indices, ...] += updates[...]
# Vector indices (for each i) ref[indices[i], ...] += updates[i, ...]
# High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
Duplicate entries are handled correctly: if multiple indices
reference
the same location, their contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/ScatterAdd.png" alt /div
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> ResourceHandle | resource: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 dtype | updates: A tensor of updated values to add to |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> ResourceHandle | var |
-> ResourceHandle | accum: Should be from a Variable(). |
-> ResourceHandle | accum_update: : Should be from a Variable(). |
-> Tensor v'4 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'5 t | rho: Decay factor. Must be a scalar. |
-> Tensor v'6 t | epsilon: Constant factor. Must be a scalar. |
-> Tensor v'7 t | grad: The gradient. |
-> Tensor v'8 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' ControlNode |
var: Should be from a Variable().
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> ResourceHandle | var |
-> ResourceHandle | accum: Should be from a Variable(). |
-> ResourceHandle | accum_update: : Should be from a Variable(). |
-> Tensor v'4 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'5 t | rho: Decay factor. Must be a scalar. |
-> Tensor v'6 t | epsilon: Constant factor. Must be a scalar. |
-> Tensor v'7 t | grad: The gradient. |
-> Tensor v'8 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' ControlNode |
Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
That is for rows we have grad for, we update var and accum as follows: accum += grad * grad var -= lr * grad * (1 / sqrt(accum))
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | gradient_accumulator: Should be from a Variable(). |
-> ResourceHandle | gradient_squared_accumulator: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'7 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'8 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'9 Int64 | global_step: Training step number. Must be a scalar. |
-> m' ControlNode |
Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | gradient_accumulator: Should be from a Variable(). |
-> ResourceHandle | gradient_squared_accumulator: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'7 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'8 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'9 Int64 | global_step: Training step number. Must be a scalar. |
-> m' ControlNode |
resourceSparseApplyCenteredRMSProp
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | mg: Should be from a Variable(). |
-> ResourceHandle | ms: Should be from a Variable(). |
-> ResourceHandle | mom: Should be from a Variable(). |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'7 t | momentum |
-> Tensor v'8 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'9 t | grad: The gradient. |
-> Tensor v'10 tindices | indices: A vector of indices into the first dimension of var, ms and mom. |
-> m' ControlNode |
Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment (i.e., the variance) for normalization, as opposed to regular RMSProp, which uses the (uncentered) second moment. This often helps with training, but is slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will update even if the grad is zero, but in this sparse implementation, mg, ms, and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 mean_grad = decay * mean_grad + (1-decay) * gradient Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom
resourceSparseApplyCenteredRMSProp'
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | mg: Should be from a Variable(). |
-> ResourceHandle | ms: Should be from a Variable(). |
-> ResourceHandle | mom: Should be from a Variable(). |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'7 t | momentum |
-> Tensor v'8 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'9 t | grad: The gradient. |
-> Tensor v'10 tindices | indices: A vector of indices into the first dimension of var, ms and mom. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> ResourceHandle | linear: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'7 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'8 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'9 t | lr_power: Scaling factor. Must be a scalar. |
-> m' ControlNode |
Update relevant entries in '*var' according to the Ftrl-proximal scheme.
That is for rows we have grad for, we update var, accum and linear as follows: accum_new = accum + grad * grad linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> ResourceHandle | linear: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'7 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'8 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'9 t | lr_power: Scaling factor. Must be a scalar. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | momentum: Momentum. Must be a scalar. |
-> m' ControlNode |
Update relevant entries in '*var' and '*accum' according to the momentum scheme.
Set use_nesterov = True if you want to use Nesterov momentum.
That is for rows we have grad for, we update var and accum as follows:
accum = accum * momentum + grad var -= lr * accum
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | momentum: Momentum. Must be a scalar. |
-> m' ControlNode |
resourceSparseApplyProximalAdagrad
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'5 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'6 t | grad: The gradient. |
-> Tensor v'7 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' ControlNode |
Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
That is for rows we have grad for, we update var and accum as follows: accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
resourceSparseApplyProximalAdagrad'
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'5 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'6 t | grad: The gradient. |
-> Tensor v'7 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' ControlNode |
resourceSparseApplyProximalGradientDescent
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> Tensor v'2 t | alpha: Scaling factor. Must be a scalar. |
-> Tensor v'3 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'4 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'5 t | grad: The gradient. |
-> Tensor v'6 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' ControlNode |
Sparse update '*var' as FOBOS algorithm with fixed learning rate.
That is for rows we have grad for, we update var as follows: prox_v = var - alpha * grad var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
resourceSparseApplyProximalGradientDescent'
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> Tensor v'2 t | alpha: Scaling factor. Must be a scalar. |
-> Tensor v'3 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'4 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'5 t | grad: The gradient. |
-> Tensor v'6 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | ms: Should be from a Variable(). |
-> ResourceHandle | mom: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'6 t | momentum |
-> Tensor v'7 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'8 t | grad: The gradient. |
-> Tensor v'9 tindices | indices: A vector of indices into the first dimension of var, ms and mom. |
-> m' ControlNode |
Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will update even if the grad is zero, but in this sparse implementation, ms and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> ResourceHandle | var: Should be from a Variable(). |
-> ResourceHandle | ms: Should be from a Variable(). |
-> ResourceHandle | mom: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'6 t | momentum |
-> Tensor v'7 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'8 t | grad: The gradient. |
-> Tensor v'9 tindices | indices: A vector of indices into the first dimension of var, ms and mom. |
-> m' ControlNode |
:: TensorType dt | |
=> Tensor v'1 ByteString | file_pattern: Must have a single element. The pattern of the files from which we read the tensor. |
-> Tensor v'2 ByteString | tensor_name: Must have a single element. The name of the tensor to be restored. |
-> Tensor Build dt | tensor: The restored tensor. |
Restores a tensor from checkpoint files.
Reads a tensor stored in one or several files. If there are several files (for
instance because a tensor was saved as slices), file_pattern
may contain
wildcard symbols (*
and ?
) in the filename portion only, not in the
directory portion.
If a file_pattern
matches several files, preferred_shard
can be used to hint
in which file the requested tensor is likely to be found. This op will first
open the file at index preferred_shard
in the list of matching files and try
to restore tensors from that file. Only if some tensors or tensor slices are
not found in that first file, then the Op opens all the files. Setting
preferred_shard
to match the value passed as the shard
input
of a matching Save
Op may speed up Restore. This attribute only affects
performance, not correctness. The default value -1 means files are processed in
order.
See also RestoreSlice
.
:: TensorType dt | |
=> OpParams | |
-> Tensor v'1 ByteString | file_pattern: Must have a single element. The pattern of the files from which we read the tensor. |
-> Tensor v'2 ByteString | tensor_name: Must have a single element. The name of the tensor to be restored. |
-> Tensor Build dt | tensor: The restored tensor. |
:: TensorType dt | |
=> Tensor v'1 ByteString | file_pattern: Must have a single element. The pattern of the files from which we read the tensor. |
-> Tensor v'2 ByteString | tensor_name: Must have a single element. The name of the tensor to be restored. |
-> Tensor v'3 ByteString | shape_and_slice: Scalar. The shapes and slice specifications to use when restoring a tensors. |
-> Tensor Build dt | tensor: The restored tensor. |
Restores a tensor from checkpoint files.
This is like Restore
except that restored tensor can be listed as filling
only a slice of a larger tensor. shape_and_slice
specifies the shape of the
larger tensor and the slice that the restored tensor covers.
The shape_and_slice
input has the same format as the
elements of the shapes_and_slices
input of the SaveSlices
op.
:: TensorType dt | |
=> OpParams | |
-> Tensor v'1 ByteString | file_pattern: Must have a single element. The pattern of the files from which we read the tensor. |
-> Tensor v'2 ByteString | tensor_name: Must have a single element. The name of the tensor to be restored. |
-> Tensor v'3 ByteString | shape_and_slice: Scalar. The shapes and slice specifications to use when restoring a tensors. |
-> Tensor Build dt | tensor: The restored tensor. |
:: TensorTypes dtypes | |
=> Tensor v'1 ByteString | prefix: Must have a single element. The prefix of a V2 checkpoint. |
-> Tensor v'2 ByteString | tensor_names: shape {N}. The names of the tensors to be restored. |
-> Tensor v'3 ByteString | shape_and_slices: shape {N}. The slice specs of the tensors to be restored. Empty strings indicate that they are non-partitioned tensors. |
-> TensorList Build dtypes | tensors: shape {N}. The restored tensors, whose shapes are read from the checkpoint directly. |
Restores tensors from a V2 checkpoint.
For backward compatibility with the V1 format, this Op currently allows restoring from a V1 checkpoint as well: - This Op first attempts to find the V2 index file pointed to by "prefix", and if found proceed to read it as a V2 checkpoint; - Otherwise the V1 read path is invoked. Relying on this behavior is not recommended, as the ability to fall back to read V1 might be deprecated and eventually removed.
By default, restores the named tensors in full. If the caller wishes to restore specific slices of stored tensors, "shape_and_slices" should be non-empty strings and correspondingly well-formed.
Callers must ensure all the named tensors are indeed stored in the checkpoint.
:: TensorTypes dtypes | |
=> OpParams | |
-> Tensor v'1 ByteString | prefix: Must have a single element. The prefix of a V2 checkpoint. |
-> Tensor v'2 ByteString | tensor_names: shape {N}. The names of the tensors to be restored. |
-> Tensor v'3 ByteString | shape_and_slices: shape {N}. The slice specs of the tensors to be restored. Empty strings indicate that they are non-partitioned tensors. |
-> TensorList Build dtypes | tensors: shape {N}. The restored tensors, whose shapes are read from the checkpoint directly. |
:: OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | tensor: Up to 8-D. |
-> Tensor v'2 Bool | dims: 1-D. The dimensions to reverse. |
-> Tensor Build t | output: The same shape as |
Reverses specific dimensions of a tensor.
Given a tensor
, and a bool
tensor dims
representing the dimensions
of tensor
, this operation reverses each dimension i of tensor
where
`dims[i]` is True
.
tensor
can have up to 8 dimensions. The number of dimensions
of tensor
must equal the number of elements in dims
. In other words:
`rank(tensor) = size(dims)`
For example:
```prettyprint
# tensor t
is [[[[ 0, 1, 2, 3],
# [ 4, 5, 6, 7],
# [ 8, 9, 10, 11]],
# [[12, 13, 14, 15],
# [16, 17, 18, 19],
# [20, 21, 22, 23]]]]
# tensor t
shape is [1, 2, 3, 4]
# dims
is [False, False, False, True]
reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
[ 7, 6, 5, 4],
[ 11, 10, 9, 8]],
[[15, 14, 13, 12],
[19, 18, 17, 16],
[23, 22, 21, 20]]]]
# dims
is [False, True, False, False]
reverse(t, dims) ==> [[[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]
[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]]]]
# dims
is [False, False, True, False]
reverse(t, dims) ==> [[[[8, 9, 10, 11],
[4, 5, 6, 7],
[0, 1, 2, 3]]
[[20, 21, 22, 23],
[16, 17, 18, 19],
[12, 13, 14, 15]]]]
```
:: (TensorType t, OneOf `[Int32, Int64]` tlen) | |
=> Int64 | seq_dim: The dimension which is partially reversed. |
-> Tensor v'1 t | input: The input to reverse. |
-> Tensor v'2 tlen | seq_lengths: 1-D with length `input.dims(batch_dim)` and `max(seq_lengths) < input.dims(seq_dim)` |
-> Tensor Build t | output: The partially reversed input. It has the same shape as |
Reverses variable length slices.
This op first slices input
along the dimension batch_dim
, and for each
slice i
, reverses the first `seq_lengths[i]` elements along
the dimension seq_dim
.
The elements of seq_lengths
must obey `seq_lengths[i] < input.dims[seq_dim]`,
and seq_lengths
must be a vector of length `input.dims[batch_dim]`.
The output slice i
along dimension batch_dim
is then given by input
slice i
, with the first `seq_lengths[i]` slices along dimension
seq_dim
reversed.
For example:
```prettyprint # Given this: batch_dim = 0 seq_dim = 1 input.dims = (4, 8, ...) seq_lengths = [7, 2, 3, 5]
# then slices of input are reversed on seq_dim, but only up to seq_lengths: output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
# while entries past seq_lens are copied through: output[0, 7:, :, ...] = input[0, 7:, :, ...] output[1, 2:, :, ...] = input[1, 2:, :, ...] output[2, 3:, :, ...] = input[2, 3:, :, ...] output[3, 2:, :, ...] = input[3, 2:, :, ...] ```
In contrast, if:
```prettyprint # Given this: batch_dim = 2 seq_dim = 0 input.dims = (8, ?, 4, ...) seq_lengths = [7, 2, 3, 5]
# then slices of input are reversed on seq_dim, but only up to seq_lengths: output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
# while entries past seq_lens are copied through: output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] ```
:: (TensorType t, OneOf `[Int32, Int64]` tlen) | |
=> OpParams | |
-> Int64 | seq_dim: The dimension which is partially reversed. |
-> Tensor v'1 t | input: The input to reverse. |
-> Tensor v'2 tlen | seq_lengths: 1-D with length `input.dims(batch_dim)` and `max(seq_lengths) < input.dims(seq_dim)` |
-> Tensor Build t | output: The partially reversed input. It has the same shape as |
:: (OneOf `[Int32, Int64]` tidx, OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) | |
=> Tensor v'1 t | tensor: Up to 8-D. |
-> Tensor v'2 tidx | axis: 1-D. The indices of the dimensions to reverse. |
-> Tensor Build t | output: The same shape as |
Reverses specific dimensions of a tensor.
NOTE `tf.reverse` has now changed behavior in preparation for 1.0. `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
Given a tensor
, and a int32
tensor axis
representing the set of
dimensions of tensor
to reverse. This operation reverses each dimension
i
for which there exists j
s.t. `axis[j] == i`.
tensor
can have up to 8 dimensions. The number of dimensions specified
in axis
may be 0 or more entries. If an index is specified more than
once, a InvalidArgument error is raised.
For example:
```prettyprint
# tensor t
is [[[[ 0, 1, 2, 3],
# [ 4, 5, 6, 7],
# [ 8, 9, 10, 11]],
# [[12, 13, 14, 15],
# [16, 17, 18, 19],
# [20, 21, 22, 23]]]]
# tensor t
shape is [1, 2, 3, 4]
# dims
is [3] or dims
is -1
reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
[ 7, 6, 5, 4],
[ 11, 10, 9, 8]],
[[15, 14, 13, 12],
[19, 18, 17, 16],
[23, 22, 21, 20]]]]
# dims
is '[1]' (or dims
is '[-3]')
reverse(t, dims) ==> [[[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]
[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]]]]
# dims
is '[2]' (or dims
is '[-2]')
reverse(t, dims) ==> [[[[8, 9, 10, 11],
[4, 5, 6, 7],
[0, 1, 2, 3]]
[[20, 21, 22, 23],
[16, 17, 18, 19],
[12, 13, 14, 15]]]]
```
Returns element-wise integer closest to x.
If the result is midway between two representable values, the even representable is chosen. For example:
``` rint(-1.5) ==> -2.0 rint(0.5000001) ==> 1.0 rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] ```
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round according to the current system rounding mode use std::cint.
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes reciprocal of square root of x element-wise.
I.e., \(y = 1 / sqrt{x}\).
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Computes the gradient for the rsqrt of x
wrt its input.
Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and dy
is the corresponding input gradient.
:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8]` t) | |
=> Tensor v'1 t | image_size: 1-D, containing `[height, width, channels]`. |
-> Tensor v'2 Float | bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. |
-> m' (Tensor Value t, Tensor Value t, Tensor Value Float) | (begin, size, bboxes)
|
Generate a single randomly distorted bounding box for an image.
Bounding box annotations are often supplied in addition to ground-truth labels
in image recognition or object localization tasks. A common technique for
training such a system is to randomly distort an image while preserving
its content, i.e. *data augmentation*. This Op outputs a randomly distorted
localization of an object, i.e. bounding box, given an image_size
,
bounding_boxes
and a series of constraints.
The output of this Op is a single bounding box that may be used to crop the
original image. The output is returned as 3 tensors: begin
, size
and
bboxes
. The first 2 tensors can be fed directly into `tf.slice` to crop the
image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image.
For example,
```python # Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes)
# Draw the bounding box in an image summary.
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox_for_draw)
tf.image_summary(images_with_box
, image_with_box)
# Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size) ```
Note that if no bounding box information is available, setting
`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
bounding box covering the whole image. If use_image_if_no_bounding_boxes
is
false and no bounding boxes are supplied, an error is raised.
:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8]` t) | |
=> OpParams | |
-> Tensor v'1 t | image_size: 1-D, containing `[height, width, channels]`. |
-> Tensor v'2 Float | bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. |
-> m' (Tensor Value t, Tensor Value t, Tensor Value Float) | (begin, size, bboxes)
|
:: (MonadBuild m', TensorTypes t) | |
=> Tensor v'1 ByteString | filename: Must have a single element. The name of the file to which we write the tensor. |
-> Tensor v'2 ByteString | tensor_names: Shape `[N]`. The names of the tensors to be saved. |
-> TensorList v'3 t | data: |
-> m' ControlNode |
Saves the input tensors to disk.
The size of tensor_names
must match the number of tensors in `data`. `data[i]`
is written to filename
with name `tensor_names[i]`.
See also SaveSlices
.
:: (MonadBuild m', TensorTypes t) | |
=> OpParams | |
-> Tensor v'1 ByteString | filename: Must have a single element. The name of the file to which we write the tensor. |
-> Tensor v'2 ByteString | tensor_names: Shape `[N]`. The names of the tensors to be saved. |
-> TensorList v'3 t | data: |
-> m' ControlNode |
:: (MonadBuild m', TensorTypes t) | |
=> Tensor v'1 ByteString | filename: Must have a single element. The name of the file to which we write the tensor. |
-> Tensor v'2 ByteString | tensor_names: Shape `[N]`. The names of the tensors to be saved. |
-> Tensor v'3 ByteString | shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when saving the tensors. |
-> TensorList v'4 t | data: |
-> m' ControlNode |
Saves input tensors slices to disk.
This is like Save
except that tensors can be listed in the saved file as being
a slice of a larger tensor. shapes_and_slices
specifies the shape of the
larger tensor and the slice that this tensor covers. shapes_and_slices
must
have as many elements as tensor_names
.
Elements of the shapes_and_slices
input must either be:
- The empty string, in which case the corresponding tensor is saved normally.
- A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
dimI
are the dimensions of the larger tensor and `slice-spec` specifies what part is covered by the tensor to save.
`slice-spec` itself is a :
-separated list: `slice0:slice1:...:sliceN-1`
where each sliceI
is either:
- The string
-
meaning that the slice covers all indices of this dimension - `start,length` where
start
andlength
are integers. In that case the slice coverslength
indices starting atstart
.
See also Save
.
:: (MonadBuild m', TensorTypes t) | |
=> OpParams | |
-> Tensor v'1 ByteString | filename: Must have a single element. The name of the file to which we write the tensor. |
-> Tensor v'2 ByteString | tensor_names: Shape `[N]`. The names of the tensors to be saved. |
-> Tensor v'3 ByteString | shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when saving the tensors. |
-> TensorList v'4 t | data: |
-> m' ControlNode |
:: (MonadBuild m', TensorTypes dtypes) | |
=> Tensor v'1 ByteString | prefix: Must have a single element. The prefix of the V2 checkpoint to which we write the tensors. |
-> Tensor v'2 ByteString | tensor_names: shape {N}. The names of the tensors to be saved. |
-> Tensor v'3 ByteString | shape_and_slices: shape {N}. The slice specs of the tensors to be saved. Empty strings indicate that they are non-partitioned tensors. |
-> TensorList v'4 dtypes | tensors: |
-> m' ControlNode |
Saves tensors in V2 checkpoint format.
By default, saves the named tensors in full. If the caller wishes to save specific slices of full tensors, "shape_and_slices" should be non-empty strings and correspondingly well-formed.
:: (MonadBuild m', TensorTypes dtypes) | |
=> OpParams | |
-> Tensor v'1 ByteString | prefix: Must have a single element. The prefix of the V2 checkpoint to which we write the tensors. |
-> Tensor v'2 ByteString | tensor_names: shape {N}. The names of the tensors to be saved. |
-> Tensor v'3 ByteString | shape_and_slices: shape {N}. The slice specs of the tensors to be saved. Empty strings indicate that they are non-partitioned tensors. |
-> TensorList v'4 dtypes | tensors: |
-> m' ControlNode |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 ByteString | tags: Tags for the summary. |
-> Tensor v'2 t | values: Same shape as `tags. Values for the summary. |
-> Tensor Build ByteString | summary: Scalar. Serialized |
Outputs a Summary
protocol buffer with scalar values.
The input tags
and values
must have the same shape. The generated summary
has a summary value for each tag-value pair in tags
and values
.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 t | updates: A tensor of updated values to add to |
-> m' (Tensor Ref t) | output_ref: = Same as |
Adds sparse updates to a variable reference.
This operation computes
# Scalar indices ref[indices, ...] += updates[...]
# Vector indices (for each i) ref[indices[i], ...] += updates[i, ...]
# High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
This operation outputs ref
after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple indices
reference
the same location, their contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/ScatterAdd.png" alt /div
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 t | updates: A tensor of updated values to add to |
-> m' (Tensor Ref t) | output_ref: = Same as |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 t | updates: A tensor of values that |
-> m' (Tensor Ref t) | output_ref: = Same as |
Divides a variable reference by sparse updates.
This operation computes
# Scalar indices ref[indices, ...] /= updates[...]
# Vector indices (for each i) ref[indices[i], ...] /= updates[i, ...]
# High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
This operation outputs ref
after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple indices
reference
the same location, their contributions divide.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 t | updates: A tensor of values that |
-> m' (Tensor Ref t) | output_ref: = Same as |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 t | updates: A tensor of updated values to multiply to |
-> m' (Tensor Ref t) | output_ref: = Same as |
Multiplies sparse updates into a variable reference.
This operation computes
# Scalar indices ref[indices, ...] *= updates[...]
# Vector indices (for each i) ref[indices[i], ...] *= updates[i, ...]
# High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
This operation outputs ref
after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple indices
reference
the same location, their contributions multiply.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 t | updates: A tensor of updated values to multiply to |
-> m' (Tensor Ref t) | output_ref: = Same as |
:: (TensorType t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor v'1 tindices | indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref. |
-> Tensor v'2 t | updates: A Tensor. Must have the same type as tensor. A tensor of updated values to store in ref. |
-> Tensor v'3 tindices | shape: A vector. The shape of the resulting tensor. |
-> Tensor Build t | output: A new tensor with the given shape and updates applied according to the indices. |
Creates a new tensor by applying sparse updates
to individual
values or slices within a zero tensor of the given shape
tensor according to
indices. This operator is the inverse of the tf.gather_nd
operator which extracts values or slices from a given tensor.
TODO(simister): Add a link to Variable.getitem documentation on slice syntax.
shape
is a TensorShape
with rank P
and indices
is a Tensor
of rank
Q
.
indices
must be integer tensor, containing indices into shape
.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of indices
(with length K
) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the K
th
dimension of shape
.
updates
is Tensor of rank `Q-1+P-K` with shape:
``` [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]]. ```
The simplest form of scatter is to insert individual elements in a tensor by index. For example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 elements.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/ScatterNd1.png" alt /div
In Python, this scatter operation would look like this:
indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) shape = tf.constant([8]) scatter = tf.scatter_nd(indices, updates, shape) with tf.Session() as sess: print sess.run(scatter)
The resulting tensor would look like this:
- 0, 11, 0, 10, 9, 0, 0, 12
We can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/ScatterNd2.png" alt /div
In Python, this scatter operation would look like this:
indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) shape = tf.constant([4, 4, 4]) scatter = tf.scatter_nd(indices, updates, shape) with tf.Session() as sess: print sess.run(scatter)
The resulting tensor would look like this:
- [[5, 5, 5, 5
- , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
- [0, 0, 0, 0
- , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
- [5, 5, 5, 5
- , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
- [0, 0, 0, 0
- , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
:: (TensorType t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor v'1 tindices | indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref. |
-> Tensor v'2 t | updates: A Tensor. Must have the same type as tensor. A tensor of updated values to store in ref. |
-> Tensor v'3 tindices | shape: A vector. The shape of the resulting tensor. |
-> Tensor Build t | output: A new tensor with the given shape and updates applied according to the indices. |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | ref: A mutable Tensor. Should be from a Variable node. |
-> Tensor v'2 tindices | indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref. |
-> Tensor v'3 t | updates: A Tensor. Must have the same type as ref. A tensor of updated values to add to ref. |
-> m' (Tensor Ref t) | output_ref: Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done. |
Applies sparse addition between updates
and individual values or slices
within a given variable according to indices
.
ref
is a Tensor
with rank P
and indices
is a Tensor
of rank Q
.
indices
must be integer tensor, containing indices into ref
.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of indices
(with length K
) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the K
th
dimension of ref
.
updates
is Tensor
of rank `Q-1+P-K` with shape:
``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ```
For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that addition would look like this:
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) add = tf.scatter_nd_add(ref, indices, updates) with tf.Session() as sess: print sess.run(add)
The resulting update to ref would look like this:
- 1, 13, 3, 14, 14, 6, 7, 20
See tf.scatter_nd for more details about how to make updates to slices.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | ref: A mutable Tensor. Should be from a Variable node. |
-> Tensor v'2 tindices | indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref. |
-> Tensor v'3 t | updates: A Tensor. Must have the same type as ref. A tensor of updated values to add to ref. |
-> m' (Tensor Ref t) | output_ref: Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done. |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | ref: A mutable Tensor. Should be from a Variable node. |
-> Tensor v'2 tindices | indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref. |
-> Tensor v'3 t | updates: A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref. |
-> m' (Tensor Ref t) | output_ref: Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done. |
Applies sparse subtraction between updates
and individual values or slices
within a given variable according to indices
.
ref
is a Tensor
with rank P
and indices
is a Tensor
of rank Q
.
indices
must be integer tensor, containing indices into ref
.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of indices
(with length K
) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the K
th
dimension of ref
.
updates
is Tensor
of rank `Q-1+P-K` with shape:
``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ```
For example, say we want to subtract 4 scattered elements from a rank-1 tensor with 8 elements. In Python, that subtraction would look like this:
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) sub = tf.scatter_nd_sub(ref, indices, updates) with tf.Session() as sess: print sess.run(sub)
The resulting update to ref would look like this:
- 1, -9, 3, -6, -4, 6, 7, -4
See tf.scatter_nd for more details about how to make updates to slices.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | ref: A mutable Tensor. Should be from a Variable node. |
-> Tensor v'2 tindices | indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref. |
-> Tensor v'3 t | updates: A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref. |
-> m' (Tensor Ref t) | output_ref: Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done. |
:: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | ref: A mutable Tensor. Should be from a Variable node. |
-> Tensor v'2 tindices | indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref. |
-> Tensor v'3 t | updates: A Tensor. Must have the same type as ref. A tensor of updated values to add to ref. |
-> m' (Tensor Ref t) | output_ref: Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done. |
Applies sparse updates
to individual values or slices within a given
variable according to indices
.
ref
is a Tensor
with rank P
and indices
is a Tensor
of rank Q
.
indices
must be integer tensor, containing indices into ref
.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of indices
(with length K
) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the K
th
dimension of ref
.
updates
is Tensor
of rank `Q-1+P-K` with shape:
``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ```
For example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this:
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) update = tf.scatter_nd_update(ref, indices, updates) with tf.Session() as sess: print sess.run(update)
The resulting update to ref would look like this:
- 1, 11, 3, 10, 9, 6, 7, 12
See tf.scatter_nd for more details about how to make updates to slices.
:: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | ref: A mutable Tensor. Should be from a Variable node. |
-> Tensor v'2 tindices | indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref. |
-> Tensor v'3 t | updates: A Tensor. Must have the same type as ref. A tensor of updated values to add to ref. |
-> m' (Tensor Ref t) | output_ref: Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done. |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 t | updates: A tensor of updated values to subtract from |
-> m' (Tensor Ref t) | output_ref: = Same as |
Subtracts sparse updates to a variable reference.
# Scalar indices ref[indices, ...] -= updates[...]
# Vector indices (for each i) ref[indices[i], ...] -= updates[i, ...]
# High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
This operation outputs ref
after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple indices
reference
the same location, their (negated) contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/ScatterSub.png" alt /div
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 t | updates: A tensor of updated values to subtract from |
-> m' (Tensor Ref t) | output_ref: = Same as |
:: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 t | updates: A tensor of updated values to store in |
-> m' (Tensor Ref t) | output_ref: = Same as |
Applies sparse updates to a variable reference.
This operation computes
# Scalar indices ref[indices, ...] = updates[...]
# Vector indices (for each i) ref[indices[i], ...] = updates[i, ...]
# High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
This operation outputs ref
after the update is done.
This makes it easier to chain operations that need to use the reset value.
If values in ref
is to be updated more than once, because there are
duplicate entries in indices
, the order at which the updates happen
for each value is undefined.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/ScatterUpdate.png" alt /div
:: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | ref: Should be from a |
-> Tensor v'2 tindices | indices: A tensor of indices into the first dimension of |
-> Tensor v'3 t | updates: A tensor of updated values to store in |
-> m' (Tensor Ref t) | output_ref: = Same as |
:: Tensor v'1 ByteString | input: vector of strings to compute fingerprints on. |
-> Tensor Build Int64 | output: a (N,2) shaped matrix where N is the number of elements in the input vector. Each row contains the low and high parts of the fingerprint. |
Computes fingerprints of the input strings.
:: Float | l1: Symmetric l1 regularization strength. |
-> Float | l2: Symmetric l2 regularization strength. |
-> Int64 | num_inner_iterations: Number of iterations per mini-batch. |
-> Int64 | num_loss_partitions: Number of partitions of the global loss function. |
-> [Tensor v'1 Int64] | sparse_example_indices: a list of vectors which contain example indices. |
-> [Tensor v'2 Int64] | sparse_feature_indices: a list of vectors which contain feature indices. |
-> [Tensor v'3 Float] | sparse_feature_values: a list of vectors which contains feature value associated with each feature group. |
-> [Tensor v'4 Float] | dense_features: a list of matrices which contains the dense feature values. |
-> Tensor v'5 Float | example_weights: a vector which contains the weight associated with each example. |
-> Tensor v'6 Float | example_labels: a vector which contains the label/target associated with each example. |
-> [Tensor v'7 Int64] | sparse_indices: a list of vectors where each value is the indices which has corresponding weights in sparse_weights. This field maybe ommitted for the dense approach. |
-> [Tensor v'8 Float] | sparse_weights: a list of vectors where each value is the weight associated with a sparse feature group. |
-> [Tensor v'9 Float] | dense_weights: a list of vectors where the values are the weights associated with a dense feature group. |
-> Tensor v'10 Float | example_state_data: a list of vectors containing the example state data. |
-> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) | (out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights)
|
Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
linear models with L1 + L2 regularization. As global optimization objective is strongly-convex, the optimizer optimizes the dual objective at each step. The optimizer applies each update one example at a time. Examples are sampled uniformly, and the optimizer is learning rate free and enjoys linear convergence rate.
Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai; Zhang, Tong. 2012 arXiv1211.2717S: http://arxiv.org/pdf/1211.2717v1.pdf
Loss objective = sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|
Adding vs. Averaging in Distributed Primal-Dual Optimization. Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, Martin Takac http://arxiv.org/abs/1502.03508
Stochastic Dual Coordinate Ascent with Adaptive Probabilities Dominik Csiba, Zheng Qu, Peter Richtarik https://arxiv.org/abs/1502.08053
:: OpParams | |
-> Float | l1: Symmetric l1 regularization strength. |
-> Float | l2: Symmetric l2 regularization strength. |
-> Int64 | num_inner_iterations: Number of iterations per mini-batch. |
-> Int64 | num_loss_partitions: Number of partitions of the global loss function. |
-> [Tensor v'1 Int64] | sparse_example_indices: a list of vectors which contain example indices. |
-> [Tensor v'2 Int64] | sparse_feature_indices: a list of vectors which contain feature indices. |
-> [Tensor v'3 Float] | sparse_feature_values: a list of vectors which contains feature value associated with each feature group. |
-> [Tensor v'4 Float] | dense_features: a list of matrices which contains the dense feature values. |
-> Tensor v'5 Float | example_weights: a vector which contains the weight associated with each example. |
-> Tensor v'6 Float | example_labels: a vector which contains the label/target associated with each example. |
-> [Tensor v'7 Int64] | sparse_indices: a list of vectors where each value is the indices which has corresponding weights in sparse_weights. This field maybe ommitted for the dense approach. |
-> [Tensor v'8 Float] | sparse_weights: a list of vectors where each value is the weight associated with a sparse feature group. |
-> [Tensor v'9 Float] | dense_weights: a list of vectors where the values are the weights associated with a dense feature group. |
-> Tensor v'10 Float | example_state_data: a list of vectors containing the example state data. |
-> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) | (out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights)
|
:: MonadBuild m' | |
=> Float | l1: Symmetric l1 regularization strength. |
-> Float | l2: Symmetric l2 regularization strength. Should be a positive float. |
-> [Tensor Ref Float] | weights: a list of vectors where each value is the weight associated with a feature group. |
-> m' ControlNode |
Applies L1 regularization shrink step on the parameters.
:: MonadBuild m' | |
=> OpParams | |
-> Float | l1: Symmetric l1 regularization strength. |
-> Float | l2: Symmetric l2 regularization strength. Should be a positive float. |
-> [Tensor Ref Float] | weights: a list of vectors where each value is the weight associated with a feature group. |
-> m' ControlNode |
:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s first dimension. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
Computes the maximum along segments of a tensor.
Read the section on Segmentation for an explanation of segments.
Computes a tensor such that
\(output_i = max_j(data_j)\) where max
is over j
such
that `segment_ids[j] == i`.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/SegmentMax.png" alt /div
:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s first dimension. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s first dimension. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
Computes the mean along segments of a tensor.
Read the section on Segmentation for an explanation of segments.
Computes a tensor such that
\(output_i = frac{sum_j data_j}{N}\) where mean
is
over j
such that `segment_ids[j] == i` and N
is the total number of
values summed.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/SegmentMean.png" alt /div
:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s first dimension. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s first dimension. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
Computes the minimum along segments of a tensor.
Read the section on Segmentation for an explanation of segments.
Computes a tensor such that
\(output_i = min_j(data_j)\) where min
is over j
such
that `segment_ids[j] == i`.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/SegmentMin.png" alt /div
:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s first dimension. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s first dimension. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
Computes the product along segments of a tensor.
Read the section on Segmentation for an explanation of segments.
Computes a tensor such that
\(output_i = prod_j data_j\) where the product is over j
such
that `segment_ids[j] == i`.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/SegmentProd.png" alt /div
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s first dimension. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s first dimension. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
Computes the sum along segments of a tensor.
Read the section on Segmentation for an explanation of segments.
Computes a tensor such that
\(output_i = sum_j data_j\) where sum is over j
such
that `segment_ids[j] == i`.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/SegmentSum.png" alt /div
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s first dimension. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
:: TensorType t | |
=> Tensor v'1 Bool | condition |
-> Tensor v'2 t | t: = A |
-> Tensor v'3 t | e: = A |
-> Tensor Build t | output: = A |
Selects elements from t
or e
, depending on condition
.
The t
, and e
tensors must all have the same shape, and the
output will also have that shape.
The condition
tensor must be a scalar if t
and e
are scalars.
If t
and e
are vectors or higher rank, then condition
must be either a
scalar, a vector with size matching the first dimension of t
, or must have
the same shape as t
.
The condition
tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be
taken from t
(if true) or e
(if false).
If condition
is a vector and t
and e
are higher rank matrices, then
it chooses which row (outer dimension) to copy from t
and e
.
If condition
has the same shape as t
and e
, then it chooses which
element to copy from t
and e
.
For example:
```prettyprint
# condition
tensor is [[True, False]
# [False, True]]
# t
is [[1, 2],
# [3, 4]]
# e
is [[5, 6],
# [7, 8]]
select(condition, t, e) ==> [[1, 6],
[7, 4]]
# condition
tensor is [True, False]
# t
is [[1, 2],
# [3, 4]]
# e
is [[5, 6],
# [7, 8]]
select(condition, t, e) ==> [[1, 2],
[7, 8]]
```
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 Bool | condition |
-> Tensor v'2 t | t: = A |
-> Tensor v'3 t | e: = A |
-> Tensor Build t | output: = A |
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | input: Shape is `[..., M, M]`. |
-> Tensor Build t | output: Shape is `[..., M+1, M]`. |
Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices, with the same constraints as the single matrix SelfAdjointEig.
The result is a [..., M+1, M] matrix with [..., 0,:] containing the eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 t | input: |
-> (Tensor Build t, Tensor Build t) | (e, v)
|
Computes the eigen decomposition of one or more square self-adjoint matrices.
Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
input
such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.
```prettyprint # a is a tensor. # e is a tensor of eigenvalues. # v is a tensor of eigenvectors. e, v = self_adjoint_eig(a) e = self_adjoint_eig(a, compute_v=False) ```
:: TensorType t | |
=> Tensor v'1 Int64 | sparse_indices: 2-D. The |
-> Tensor v'2 t | sparse_values: 1-D. The |
-> Tensor v'3 Int64 | sparse_shape: 1-D. The |
-> Tensor Build ByteString | serialized_sparse |
Serialize an N
-minibatch SparseTensor
into an `[N, 3]` string Tensor
.
The SparseTensor
must have rank R
greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the SparseTensor
must be sorted in increasing order of this first dimension. The serialized
SparseTensor
objects going into each row of serialized_sparse
will have
rank `R-1`.
The minibatch size N
is extracted from `sparse_shape[0]`.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 Int64 | sparse_indices: 2-D. The |
-> Tensor v'2 t | sparse_values: 1-D. The |
-> Tensor v'3 Int64 | sparse_shape: 1-D. The |
-> Tensor Build ByteString | serialized_sparse |
:: TensorType t | |
=> Tensor v'1 Int64 | sparse_indices: 2-D. The |
-> Tensor v'2 t | sparse_values: 1-D. The |
-> Tensor v'3 Int64 | sparse_shape: 1-D. The |
-> Tensor Build ByteString | serialized_sparse |
Serialize a SparseTensor
into a string 3-vector (1-D Tensor
) object.
:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t | |
=> Tensor v'1 Int64 | set_indices: 2D |
-> Tensor v'2 t | set_values: 1D |
-> Tensor v'3 Int64 | set_shape: 1D |
-> Tensor Build Int32 | size: For |
Number of unique elements along last dimension of input set
.
Input set
is a SparseTensor
represented by set_indices
, set_values
,
and set_shape
. The last dimension contains values in a set, duplicates are
allowed but ignored.
If validate_indices
is True
, this op validates the order and range of set
indices.
:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t | |
=> OpParams | |
-> Tensor v'1 Int64 | set_indices: 2D |
-> Tensor v'2 t | set_values: 1D |
-> Tensor v'3 Int64 | set_shape: 1D |
-> Tensor Build Int32 | size: For |
:: (TensorType t, OneOf `[Int32, Int64]` out_type) | |
=> Tensor v'1 t | input |
-> Tensor Build out_type | output |
Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of input
.
For example:
```prettyprint
# t
is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
shape(t) ==> [2, 2, 3]
```
:: (TensorType t, OneOf `[Int32, Int64]` out_type) | |
=> [Tensor v'1 t] | input |
-> [Tensor Build out_type] | output |
Returns shape of tensors.
This operation returns N 1-D integer tensors representing shape of `input[i]s`.
:: Tensor v'1 ByteString | basename |
-> Tensor v'2 Int32 | shard |
-> Tensor v'3 Int32 | num_shards |
-> Tensor Build ByteString | filename |
Generate a sharded filename. The filename is printf formatted as
%s-%05d-of-%05d, basename, shard, num_shards.
:: OpParams | |
-> Tensor v'1 ByteString | basename |
-> Tensor v'2 Int32 | shard |
-> Tensor v'3 Int32 | num_shards |
-> Tensor Build ByteString | filename |
:: Tensor v'1 ByteString | basename |
-> Tensor v'2 Int32 | num_shards |
-> Tensor Build ByteString | filename |
Generate a glob pattern matching all sharded file names.
:: OpParams | |
-> Tensor v'1 ByteString | basename |
-> Tensor v'2 Int32 | num_shards |
-> Tensor Build ByteString | filename |
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes sigmoid of x
element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Computes the gradient of the sigmoid of x
wrt its input.
Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
dy
is the corresponding input gradient.
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes sin of x element-wise.
:: (TensorType t, OneOf `[Int32, Int64]` out_type) | |
=> Tensor v'1 t | input |
-> Tensor Build out_type | output |
Returns the size of a tensor.
This operation returns an integer representing the number of elements in
input
.
For example:
```prettyprint
# t
is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
size(t) ==> 12
```
:: MonadBuild m' | |
=> Int64 | batch_size: The size of produced batch. |
-> m' (Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32) | (vocab_word, vocab_freq, words_per_epoch, current_epoch, total_words_processed, examples, labels)
|
Parses a text file and creates a batch of examples.
:: MonadBuild m' | |
=> OpParams | |
-> Int64 | batch_size: The size of produced batch. |
-> m' (Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32) | (vocab_word, vocab_freq, words_per_epoch, current_epoch, total_words_processed, examples, labels)
|
:: (TensorType t, OneOf `[Int32, Int64]` index) | |
=> Tensor v'1 t | input |
-> Tensor v'2 index | begin: begin[i] specifies the offset into the |
-> Tensor v'3 index | size: size[i] specifies the number of elements of the |
-> Tensor Build t | output |
Return a slice from input
.
The output tensor is a tensor with dimensions described by size
whose values are extracted from input
starting at the offsets in
begin
.
- Requirements*: 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)
:: (TensorType t, OneOf `[Int32, Int64]` index) | |
=> OpParams | |
-> Tensor v'1 t | input |
-> Tensor v'2 index | begin: begin[i] specifies the offset into the |
-> Tensor v'3 index | size: size[i] specifies the number of elements of the |
-> Tensor Build t | output |
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 t | logits: 2-D with shape `[batch_size, num_classes]`. |
-> Tensor Build t | softmax: Same shape as |
Computes softmax activations.
For each batch i
and class j
we have
softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
:: OneOf `[Word16, Double, Float]` t | |
=> Tensor v'1 t | features: batch_size x num_classes matrix |
-> Tensor v'2 t | labels: batch_size x num_classes matrix The caller must ensure that each batch of labels represents a valid probability distribution. |
-> (Tensor Build t, Tensor Build t) | (loss, backprop)
|
Computes softmax cross entropy cost and gradients to backpropagate.
Inputs are the logits, not probabilities.
softmaxCrossEntropyWithLogits'
:: OneOf `[Word16, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | features: batch_size x num_classes matrix |
-> Tensor v'2 t | labels: batch_size x num_classes matrix The caller must ensure that each batch of labels represents a valid probability distribution. |
-> (Tensor Build t, Tensor Build t) | (loss, backprop)
|
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | features |
-> Tensor Build t | activations |
Computes softplus: `log(exp(features) + 1)`.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | gradients: The backpropagated gradients to the corresponding softplus operation. |
-> Tensor v'2 t | features: The features passed as input to the corresponding softplus operation. |
-> Tensor Build t | backprops: The gradients: `gradients / (1 + exp(-features))`. |
Computes softplus gradients for a softplus operation.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | gradients: The backpropagated gradients to the corresponding softplus operation. |
-> Tensor v'2 t | features: The features passed as input to the corresponding softplus operation. |
-> Tensor Build t | backprops: The gradients: `gradients / (1 + exp(-features))`. |
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | features |
-> Tensor Build t | activations |
Computes softsign: `features / (abs(features) + 1)`.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | gradients: The backpropagated gradients to the corresponding softsign operation. |
-> Tensor v'2 t | features: The features passed as input to the corresponding softsign operation. |
-> Tensor Build t | backprops: The gradients: `gradients / (1 + abs(-features)) ** 2`. |
Computes softsign gradients for a softsign operation.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | gradients: The backpropagated gradients to the corresponding softsign operation. |
-> Tensor v'2 t | features: The features passed as input to the corresponding softsign operation. |
-> Tensor Build t | backprops: The gradients: `gradients / (1 + abs(-features)) ** 2`. |
:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) | |
=> Int64 | block_size |
-> Tensor v'1 t | input: 4-D with shape `[batch, height, width, depth]`. |
-> Tensor v'2 tpaddings | paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies the padding of the input with zeros across the spatial dimensions as follows: paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] The effective spatial dimensions of the zero-padded input tensor will be: height_pad = pad_top + height + pad_bottom width_pad = pad_left + width + pad_right The attr
The shape of the output will be: [batch*block_size*block_size, height_padblock_size, width_padblock_size, depth] Some examples:
```prettyprint x = [[[[1], [2]], [[3], [4]]]] ``` The output tensor has shape `[4, 1, 1, 1]` and value: ```prettyprint [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ```
```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` The output tensor has shape `[4, 1, 1, 3]` and value: ```prettyprint [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ```
```prettyprint x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[4, 2, 2, 1]` and value: ```prettyprint x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ```
```prettyprint x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[8, 1, 2, 1]` and value: ```prettyprint x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] ``` Among others, this operation is useful for reducing atrous convolution into regular convolution. |
-> Tensor Build t | output |
SpaceToBatch for 4-D tensors of type T.
This is a legacy version of the more general SpaceToBatchND.
Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
More specifically, this op outputs a copy of the input tensor where values from
the height
and width
dimensions are moved to the batch
dimension. After
the zero-padding, both height
and width
of the input must be divisible by the
block size.
:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) | |
=> OpParams | |
-> Int64 | block_size |
-> Tensor v'1 t | input: 4-D with shape `[batch, height, width, depth]`. |
-> Tensor v'2 tpaddings | paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies the padding of the input with zeros across the spatial dimensions as follows: paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] The effective spatial dimensions of the zero-padded input tensor will be: height_pad = pad_top + height + pad_bottom width_pad = pad_left + width + pad_right The attr
The shape of the output will be: [batch*block_size*block_size, height_padblock_size, width_padblock_size, depth] Some examples:
```prettyprint x = [[[[1], [2]], [[3], [4]]]] ``` The output tensor has shape `[4, 1, 1, 1]` and value: ```prettyprint [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ```
```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` The output tensor has shape `[4, 1, 1, 3]` and value: ```prettyprint [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ```
```prettyprint x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[4, 2, 2, 1]` and value: ```prettyprint x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ```
```prettyprint x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[8, 1, 2, 1]` and value: ```prettyprint x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] ``` Among others, this operation is useful for reducing atrous convolution into regular convolution. |
-> Tensor Build t | output |
:: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tpaddings) | |
=> Tensor v'1 t | input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
where spatial_shape has |
-> Tensor v'2 tblock_shape | block_shape: 1-D with shape `[M]`, all values must be >= 1. |
-> Tensor v'3 tpaddings | paddings: 2-D with shape `[M, 2]`, all values must be >= 0.
`paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
`i + 1`, which corresponds to spatial dimension This operation is equivalent to the following steps:
block_shape + [batch] + [padded_shape[1] / block_shape[0], ..., padded_shape[M] / block_shape[M-1]] + remaining_shape
Some examples:
```prettyprint x = [[[[1], [2]], [[3], [4]]]] ``` The output tensor has shape `[4, 1, 1, 1]` and value: ```prettyprint [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ```
```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` The output tensor has shape `[4, 1, 1, 3]` and value: ```prettyprint [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ```
```prettyprint x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[4, 2, 2, 1]` and value: ```prettyprint x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ```
```prettyprint x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[8, 1, 3, 1]` and value: ```prettyprint x = [[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]] ``` Among others, this operation is useful for reducing atrous convolution into regular convolution. |
-> Tensor Build t | output |
SpaceToBatch for N-D tensors of type T.
This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
grid of blocks of shape block_shape
, and interleaves these blocks with the
"batch" dimension (0) such that in the output, the spatial dimensions
`[1, ..., M]` correspond to the position within the grid, and the batch
dimension combines both the position within a spatial block and the original
batch position. Prior to division into blocks, the spatial dimensions of the
input are optionally zero padded according to paddings
. See below for a
precise description.
:: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tpaddings) | |
=> OpParams | |
-> Tensor v'1 t | input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
where spatial_shape has |
-> Tensor v'2 tblock_shape | block_shape: 1-D with shape `[M]`, all values must be >= 1. |
-> Tensor v'3 tpaddings | paddings: 2-D with shape `[M, 2]`, all values must be >= 0.
`paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
`i + 1`, which corresponds to spatial dimension This operation is equivalent to the following steps:
block_shape + [batch] + [padded_shape[1] / block_shape[0], ..., padded_shape[M] / block_shape[M-1]] + remaining_shape
Some examples:
```prettyprint x = [[[[1], [2]], [[3], [4]]]] ``` The output tensor has shape `[4, 1, 1, 1]` and value: ```prettyprint [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ```
```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` The output tensor has shape `[4, 1, 1, 3]` and value: ```prettyprint [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ```
```prettyprint x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[4, 2, 2, 1]` and value: ```prettyprint x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ```
```prettyprint x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` The output tensor has shape `[8, 1, 3, 1]` and value: ```prettyprint x = [[[[0], [1], [3]]], [[[0], [9], [11]]], [[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]] ``` Among others, this operation is useful for reducing atrous convolution into regular convolution. |
-> Tensor Build t | output |
:: TensorType t | |
=> Int64 | block_size: The size of the spatial block. |
-> Tensor v'1 t | input |
-> Tensor Build t | output |
SpaceToDepth for tensors of type T.
Rearranges blocks of spatial data, into depth. More specifically,
this op outputs a copy of the input tensor where values from the height
and width
dimensions are moved to the depth
dimension.
The attr block_size
indicates the input block size and how the data is moved.
- Non-overlapping blocks of size `block_size x block size` are rearranged into depth at each location.
- The depth of the output tensor is `input_depth * block_size * block_size`.
- The input tensor's height and width must be divisible by block_size.
That is, assuming the input is in the shape: `[batch, height, width, depth]`, the shape of the output will be: `[batch, heightblock_size, widthblock_size, depth*block_size*block_size]`
This operation requires that the input tensor be of rank 4, and that
block_size
be >=1 and a divisor of both the input height
and width
.
This operation is useful for resizing the activations between convolutions (but keeping all data), e.g. instead of pooling. It is also useful for training purely convolutional models.
For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2:
```prettyprint x = [[[[1], [2]], [[3], [4]]]] ```
This operation will output a tensor of shape `[1, 1, 1, 4]`:
```prettyprint [[[[1, 2, 3, 4]]]] ```
Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, the corresponding output will have a single element (i.e. width and height are both 1) and will have a depth of 4 channels (1 * block_size * block_size). The output element shape is `[1, 1, 4]`.
For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ```
This operation, for block_size of 2, will return the following tensor of shape `[1, 1, 1, 12]`
```prettyprint [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ```
Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
```prettyprint x = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]], [[9], [10], [13], [14]], [[11], [12], [15], [16]]]] ```
the operator will return the following tensor of shape `[1 2 2 4]`:
```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]] ```
sparseAccumulatorApplyGradient
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) | |
=> Bool | has_known_shape: Boolean indicating whether gradient_shape is unknown, in which case the input is ignored during validation. |
-> Tensor Ref ByteString | handle: The handle to a accumulator. |
-> Tensor v'2 Int64 | local_step: The local_step value at which the sparse gradient was computed. |
-> Tensor v'3 Int64 | gradient_indices: Indices of the sparse gradient to be accumulated. Must be a vector. |
-> Tensor v'4 dtype | gradient_values: Values are the non-zero slices of the gradient, and must have the same first dimension as indices, i.e., the nnz represented by indices and values must be consistent. |
-> Tensor v'5 Int64 | gradient_shape: Shape of the sparse gradient to be accumulated. |
-> m' ControlNode |
Applies a sparse gradient to a given accumulator. Does not add if local_step is
lesser than the accumulator's global_step.
sparseAccumulatorApplyGradient'
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) | |
=> OpParams | |
-> Bool | has_known_shape: Boolean indicating whether gradient_shape is unknown, in which case the input is ignored during validation. |
-> Tensor Ref ByteString | handle: The handle to a accumulator. |
-> Tensor v'2 Int64 | local_step: The local_step value at which the sparse gradient was computed. |
-> Tensor v'3 Int64 | gradient_indices: Indices of the sparse gradient to be accumulated. Must be a vector. |
-> Tensor v'4 dtype | gradient_values: Values are the non-zero slices of the gradient, and must have the same first dimension as indices, i.e., the nnz represented by indices and values must be consistent. |
-> Tensor v'5 Int64 | gradient_shape: Shape of the sparse gradient to be accumulated. |
-> m' ControlNode |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) | |
=> Tensor Ref ByteString | handle: The handle to a SparseConditionalAccumulator. |
-> Tensor v'2 Int32 | num_required: Number of gradients required before we return an aggregate. |
-> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64) | (indices, values, shape)
|
Extracts the average sparse gradient in the given SparseConditionalAccumulator,
provided that sufficient (i.e., more than num_required) gradients have been accumulated. The op will blocks until sufficient gradients have been accumulated. If the accumulator has already aggregated more than num_required gradients, it will return its average of the accumulated gradients. Also automatically increments the recorded global_step in the accumulator by 1, and resets the aggregate to 0.
sparseAccumulatorTakeGradient'
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a SparseConditionalAccumulator. |
-> Tensor v'2 Int32 | num_required: Number of gradients required before we return an aggregate. |
-> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64) | (indices, values, shape)
|
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` treal) | |
=> Tensor v'1 Int64 | a_indices: 2-D. The |
-> Tensor v'2 t | a_values: 1-D. The |
-> Tensor v'3 Int64 | a_shape: 1-D. The |
-> Tensor v'4 Int64 | b_indices: 2-D. The |
-> Tensor v'5 t | b_values: 1-D. The |
-> Tensor v'6 Int64 | b_shape: 1-D. The |
-> Tensor v'7 treal | thresh: 0-D. The magnitude threshold that determines if an output value/index pair takes space. |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (sum_indices, sum_values, sum_shape)
|
Adds two SparseTensor
objects to produce another SparseTensor
.
The input SparseTensor
objects' indices are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
SparseReorder
to restore index ordering.
By default, if two values sum to zero at some index, the output SparseTensor
would still include that particular location in its index, storing a zero in the
corresponding value slot. To override this, callers can specify thresh
,
indicating that if the sum has a magnitude strictly smaller than thresh
, its
corresponding value and index would then not be included. In particular,
`thresh == 0` (default) means everything is kept and actual thresholding happens
only for a positive value.
In the following shapes, nnz
is the count after taking thresh
into account.
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` treal) | |
=> OpParams | |
-> Tensor v'1 Int64 | a_indices: 2-D. The |
-> Tensor v'2 t | a_values: 1-D. The |
-> Tensor v'3 Int64 | a_shape: 1-D. The |
-> Tensor v'4 Int64 | b_indices: 2-D. The |
-> Tensor v'5 t | b_values: 1-D. The |
-> Tensor v'6 Int64 | b_shape: 1-D. The |
-> Tensor v'7 treal | thresh: 0-D. The magnitude threshold that determines if an output value/index pair takes space. |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (sum_indices, sum_values, sum_shape)
|
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to the non-empty values of the sum. |
-> Tensor v'2 Int64 | a_indices: 2-D. The |
-> Tensor v'3 Int64 | b_indices: 2-D. The |
-> Tensor v'4 Int64 | sum_indices: 2-D. The |
-> (Tensor Build t, Tensor Build t) | (a_val_grad, b_val_grad)
|
The gradient operator for the SparseAdd op.
The SparseAdd op calculates A + B, where A, B, and the sum are all represented
as SparseTensor
objects. This op takes in the upstream gradient w.r.t.
non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
values of A and B.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to the non-empty values of the sum. |
-> Tensor v'2 Int64 | a_indices: 2-D. The |
-> Tensor v'3 Int64 | b_indices: 2-D. The |
-> Tensor v'4 Int64 | sum_indices: 2-D. The |
-> (Tensor Build t, Tensor Build t) | (a_val_grad, b_val_grad)
|
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | var |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor Ref t | accum_update: : Should be from a Variable(). |
-> Tensor v'4 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'5 t | rho: Decay factor. Must be a scalar. |
-> Tensor v'6 t | epsilon: Constant factor. Must be a scalar. |
-> Tensor v'7 t | grad: The gradient. |
-> Tensor v'8 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' (Tensor Ref t) | out: Same as "var". |
var: Should be from a Variable().
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | var |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor Ref t | accum_update: : Should be from a Variable(). |
-> Tensor v'4 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'5 t | rho: Decay factor. Must be a scalar. |
-> Tensor v'6 t | epsilon: Constant factor. Must be a scalar. |
-> Tensor v'7 t | grad: The gradient. |
-> Tensor v'8 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
That is for rows we have grad for, we update var and accum as follows: accum += grad * grad var -= lr * grad * (1 / sqrt(accum))
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | gradient_accumulator: Should be from a Variable(). |
-> Tensor Ref t | gradient_squared_accumulator: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'7 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'8 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'9 Int64 | global_step: Training step number. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | gradient_accumulator: Should be from a Variable(). |
-> Tensor Ref t | gradient_squared_accumulator: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'7 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'8 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'9 Int64 | global_step: Training step number. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | mg: Should be from a Variable(). |
-> Tensor Ref t | ms: Should be from a Variable(). |
-> Tensor Ref t | mom: Should be from a Variable(). |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'7 t | momentum |
-> Tensor v'8 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'9 t | grad: The gradient. |
-> Tensor v'10 tindices | indices: A vector of indices into the first dimension of var, ms and mom. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment (i.e., the variance) for normalization, as opposed to regular RMSProp, which uses the (uncentered) second moment. This often helps with training, but is slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will update even if the grad is zero, but in this sparse implementation, mg, ms, and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 mean_grad = decay * mean_grad + (1-decay) * gradient Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | mg: Should be from a Variable(). |
-> Tensor Ref t | ms: Should be from a Variable(). |
-> Tensor Ref t | mom: Should be from a Variable(). |
-> Tensor v'5 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'6 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'7 t | momentum |
-> Tensor v'8 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'9 t | grad: The gradient. |
-> Tensor v'10 tindices | indices: A vector of indices into the first dimension of var, ms and mom. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor Ref t | linear: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'7 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'8 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'9 t | lr_power: Scaling factor. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update relevant entries in '*var' according to the Ftrl-proximal scheme.
That is for rows we have grad for, we update var, accum and linear as follows: accum_new = accum + grad * grad linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor Ref t | linear: Should be from a Variable(). |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'7 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'8 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'9 t | lr_power: Scaling factor. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | momentum: Momentum. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update relevant entries in '*var' and '*accum' according to the momentum scheme.
Set use_nesterov = True if you want to use Nesterov momentum.
That is for rows we have grad for, we update var and accum as follows:
accum = accum * momentum + grad var -= lr * accum
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | grad: The gradient. |
-> Tensor v'5 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> Tensor v'6 t | momentum: Momentum. Must be a scalar. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'5 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'6 t | grad: The gradient. |
-> Tensor v'7 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' (Tensor Ref t) | out: Same as "var". |
Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
That is for rows we have grad for, we update var and accum as follows: accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | accum: Should be from a Variable(). |
-> Tensor v'3 t | lr: Learning rate. Must be a scalar. |
-> Tensor v'4 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'5 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'6 t | grad: The gradient. |
-> Tensor v'7 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' (Tensor Ref t) | out: Same as "var". |
sparseApplyProximalGradientDescent
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor v'2 t | alpha: Scaling factor. Must be a scalar. |
-> Tensor v'3 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'4 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'5 t | grad: The gradient. |
-> Tensor v'6 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' (Tensor Ref t) | out: Same as "var". |
Sparse update '*var' as FOBOS algorithm with fixed learning rate.
That is for rows we have grad for, we update var as follows: prox_v = var - alpha * grad var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
sparseApplyProximalGradientDescent'
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor v'2 t | alpha: Scaling factor. Must be a scalar. |
-> Tensor v'3 t | l1: L1 regularization. Must be a scalar. |
-> Tensor v'4 t | l2: L2 regularization. Must be a scalar. |
-> Tensor v'5 t | grad: The gradient. |
-> Tensor v'6 tindices | indices: A vector of indices into the first dimension of var and accum. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | ms: Should be from a Variable(). |
-> Tensor Ref t | mom: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'6 t | momentum |
-> Tensor v'7 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'8 t | grad: The gradient. |
-> Tensor v'9 tindices | indices: A vector of indices into the first dimension of var, ms and mom. |
-> m' (Tensor Ref t) | out: Same as "var". |
Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will update even if the grad is zero, but in this sparse implementation, ms and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom
:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor Ref t | var: Should be from a Variable(). |
-> Tensor Ref t | ms: Should be from a Variable(). |
-> Tensor Ref t | mom: Should be from a Variable(). |
-> Tensor v'4 t | lr: Scaling factor. Must be a scalar. |
-> Tensor v'5 t | rho: Decay rate. Must be a scalar. |
-> Tensor v'6 t | momentum |
-> Tensor v'7 t | epsilon: Ridge term. Must be a scalar. |
-> Tensor v'8 t | grad: The gradient. |
-> Tensor v'9 tindices | indices: A vector of indices into the first dimension of var, ms and mom. |
-> m' (Tensor Ref t) | out: Same as "var". |
:: TensorType t | |
=> Int64 | concat_dim: Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input |
-> [Tensor v'1 Int64] | indices: 2-D. Indices of each input |
-> [Tensor v'2 t] | values: 1-D. Non-empty values of each |
-> [Tensor v'3 Int64] | shapes: 1-D. Shapes of each |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (output_indices, output_values, output_shape)
|
Concatenates a list of SparseTensor
along the specified dimension.
Concatenation is with respect to the dense versions of these sparse tensors.
It is assumed that each input is a SparseTensor
whose elements are ordered
along increasing dimension number.
All inputs' shapes must match, except for the concat dimension. The
indices
, values
, and shapes
lists must have the same length.
The output shape is identical to the inputs', except along the concat dimension, where it is the sum of the inputs' sizes along that dimension.
The output elements will be resorted to preserve the sort order along increasing dimension number.
This op runs in `O(M log M)` time, where M
is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `concat_dim = 1` and the inputs are
sp_inputs[0]: shape = [2, 3] [0, 2]: "a" [1, 0]: "b" [1, 1]: "c"
sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e"
then the output will be
shape = [2, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [1, 1]: "c"
Graphically this is equivalent to doing
- a
- concat [ d e ] = [ a d e ]
- b c
- [ ] [b c ]
:: TensorType t | |
=> OpParams | |
-> Int64 | concat_dim: Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input |
-> [Tensor v'1 Int64] | indices: 2-D. Indices of each input |
-> [Tensor v'2 t] | values: 1-D. Non-empty values of each |
-> [Tensor v'3 Int64] | shapes: 1-D. Shapes of each |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (output_indices, output_values, output_shape)
|
:: MonadBuild m' | |
=> DataType | dtype: The type of the value being accumulated. |
-> Shape | shape: The shape of the values. |
-> m' (Tensor Ref ByteString) | handle: The handle to the accumulator. |
A conditional accumulator for aggregating sparse gradients. The accumulator
accepts gradients marked with local_step greater or equal to the most recent global_step known to the accumulator. The average can be extracted from the accumulator, provided sufficient gradients have been accumulated. Extracting the average automatically resets the aggregate to 0, and increments the global_step recorded by the accumulator.
:: MonadBuild m' | |
=> OpParams | |
-> DataType | dtype: The type of the value being accumulated. |
-> Shape | shape: The shape of the values. |
-> m' (Tensor Ref ByteString) | handle: The handle to the accumulator. |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 Int64 | sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | sp_values: 1-D. |
-> Tensor v'3 Int64 | sp_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 t | dense: |
-> Tensor Build t | output: 1-D. The |
Adds up a SparseTensor and a dense Tensor, using these special rules:
- Broadcasts the dense side to have the same shape as the sparse side, if eligible;
- Then, only the dense values pointed to by the indices of the SparseTensor participate in the cwise addition.
By these rules, the result is a logical SparseTensor with exactly the same indices and shape, but possibly with different non-zero values. The output of this Op is the resultant non-zero values.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int64 | sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | sp_values: 1-D. |
-> Tensor v'3 Int64 | sp_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 t | dense: |
-> Tensor Build t | output: 1-D. The |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 Int64 | sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | sp_values: 1-D. |
-> Tensor v'3 Int64 | sp_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 t | dense: |
-> Tensor Build t | output: 1-D. The |
Component-wise divides a SparseTensor by a dense Tensor.
- Limitation*: this Op only broadcasts the dense side to the sparse side, but not the other direction.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int64 | sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | sp_values: 1-D. |
-> Tensor v'3 Int64 | sp_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 t | dense: |
-> Tensor Build t | output: 1-D. The |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 Int64 | sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | sp_values: 1-D. |
-> Tensor v'3 Int64 | sp_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 t | dense: |
-> Tensor Build t | output: 1-D. The |
Component-wise multiplies a SparseTensor by a dense Tensor.
The output locations corresponding to the implicitly zero elements in the sparse tensor will be zero (i.e., will not take up storage space), regardless of the contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
- Limitation*: this Op only broadcasts the dense side to the sparse side, but not the other direction.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int64 | sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | sp_values: 1-D. |
-> Tensor v'3 Int64 | sp_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 t | dense: |
-> Tensor Build t | output: 1-D. The |
:: (OneOf `[Word16, Float]` ta, OneOf `[Word16, Float]` tb) | |
=> Tensor v'1 ta | a |
-> Tensor v'2 tb | b |
-> Tensor Build Float | product |
Multiply matrix "a" by matrix "b".
The inputs must be two-dimensional matrices and the inner dimension of "a" must match the outer dimension of "b". This op is optimized for the case where at least one of "a" or "b" is sparse. The breakeven for using this versus a dense matrix multiply on one platform was 30% zero values in the sparse matrix.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 Int64 | input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | input_values: 1-D. |
-> Tensor v'3 Int64 | input_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 Int32 | reduction_axes: 1-D. Length- |
-> Tensor Build t | output: `R-K`-D. The reduced Tensor. |
Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense Tensor
instead of a sparse one.
Reduces sp_input
along the dimensions given in reduction_axes
. Unless
keep_dims
is true, the rank of the tensor is reduced by 1 for each entry in
reduction_axes
. If keep_dims
is true, the reduced dimensions are retained
with length 1.
If reduction_axes
has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int64 | input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | input_values: 1-D. |
-> Tensor v'3 Int64 | input_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 Int32 | reduction_axes: 1-D. Length- |
-> Tensor Build t | output: `R-K`-D. The reduced Tensor. |
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 Int64 | input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | input_values: 1-D. |
-> Tensor v'3 Int64 | input_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 Int32 | reduction_axes: 1-D. Length- |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (output_indices, output_values, output_shape)
|
Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a SparseTensor.
Reduces sp_input
along the dimensions given in reduction_axes
. Unless
keep_dims
is true, the rank of the tensor is reduced by 1 for each entry in
reduction_axes
. If keep_dims
is true, the reduced dimensions are retained
with length 1.
If reduction_axes
has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int64 | input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | input_values: 1-D. |
-> Tensor v'3 Int64 | input_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 Int32 | reduction_axes: 1-D. Length- |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (output_indices, output_values, output_shape)
|
:: TensorType t | |
=> Tensor v'1 Int64 | input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | input_values: 1-D. |
-> Tensor v'3 Int64 | input_shape: 1-D. Shape of the input SparseTensor. |
-> (Tensor Build Int64, Tensor Build t) | (output_indices, output_values)
|
Reorders a SparseTensor into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering along increasing dimension number. The only time ordering can be violated is during manual manipulation of the indices and values vectors to add entries.
Reordering does not affect the shape of the SparseTensor.
If the tensor has rank R
and N
non-empty values, input_indices
has
shape `[N, R]`, input_values has length N
, and input_shape has length R
.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 Int64 | input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, possibly not in canonical ordering. |
-> Tensor v'2 t | input_values: 1-D. |
-> Tensor v'3 Int64 | input_shape: 1-D. Shape of the input SparseTensor. |
-> (Tensor Build Int64, Tensor Build t) | (output_indices, output_values)
|
:: Tensor v'1 Int64 | input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a SparseTensor. |
-> Tensor v'2 Int64 | input_shape: 1-D. |
-> Tensor v'3 Int64 | new_shape: 1-D. |
-> (Tensor Build Int64, Tensor Build Int64) | (output_indices, output_shape)
|
Reshapes a SparseTensor to represent values in a new dense shape.
This operation has the same semantics as reshape on the represented dense
tensor. The input_indices
are recomputed based on the requested new_shape
.
If one component of new_shape
is the special value -1, the size of that
dimension is computed so that the total dense size remains constant. At
most one component of new_shape
can be -1. The number of dense elements
implied by new_shape
must be the same as the number of dense elements
originally implied by input_shape
.
Reshaping does not affect the order of values in the SparseTensor.
If the input tensor has rank R_in
and N
non-empty values, and new_shape
has length R_out
, then input_indices
has shape `[N, R_in]`,
input_shape
has length R_in
, output_indices
has shape `[N, R_out]`, and
output_shape
has length R_out
.
:: OpParams | |
-> Tensor v'1 Int64 | input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a SparseTensor. |
-> Tensor v'2 Int64 | input_shape: 1-D. |
-> Tensor v'3 Int64 | new_shape: 1-D. |
-> (Tensor Build Int64, Tensor Build Int64) | (output_indices, output_shape)
|
:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | data |
-> Tensor v'2 tidx | indices: A 1-D tensor. Has same rank as |
-> Tensor v'3 Int32 | segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
Computes the mean along sparse segments of a tensor.
Read the section on Segmentation for an explanation of segments.
Like SegmentMean
, but segment_ids
can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by indices
.
:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> OpParams | |
-> Tensor v'1 t | data |
-> Tensor v'2 tidx | indices: A 1-D tensor. Has same rank as |
-> Tensor v'3 Int32 | segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | grad: gradient propagated to the SparseSegmentMean op. |
-> Tensor v'2 tidx | indices: indices passed to the corresponding SparseSegmentMean op. |
-> Tensor v'3 Int32 | segment_ids: segment_ids passed to the corresponding SparseSegmentMean op. |
-> Tensor v'4 Int32 | output_dim0: dimension 0 of "data" passed to SparseSegmentMean op. |
-> Tensor Build t | output |
Computes gradients for SparseSegmentMean.
Returns tensor "output" with same shape as grad, except for dimension 0 whose value is output_dim0.
:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> OpParams | |
-> Tensor v'1 t | grad: gradient propagated to the SparseSegmentMean op. |
-> Tensor v'2 tidx | indices: indices passed to the corresponding SparseSegmentMean op. |
-> Tensor v'3 Int32 | segment_ids: segment_ids passed to the corresponding SparseSegmentMean op. |
-> Tensor v'4 Int32 | output_dim0: dimension 0 of "data" passed to SparseSegmentMean op. |
-> Tensor Build t | output |
:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | data |
-> Tensor v'2 tidx | indices: A 1-D tensor. Has same rank as |
-> Tensor v'3 Int32 | segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
Computes the sum along sparse segments of a tensor divided by the sqrt of N.
N is the size of the segment being reduced.
Read the section on Segmentation for an explanation of segments.
:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> OpParams | |
-> Tensor v'1 t | data |
-> Tensor v'2 tidx | indices: A 1-D tensor. Has same rank as |
-> Tensor v'3 Int32 | segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | grad: gradient propagated to the SparseSegmentSqrtN op. |
-> Tensor v'2 tidx | indices: indices passed to the corresponding SparseSegmentSqrtN op. |
-> Tensor v'3 Int32 | segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op. |
-> Tensor v'4 Int32 | output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op. |
-> Tensor Build t | output |
Computes gradients for SparseSegmentSqrtN.
Returns tensor "output" with same shape as grad, except for dimension 0 whose value is output_dim0.
:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> OpParams | |
-> Tensor v'1 t | grad: gradient propagated to the SparseSegmentSqrtN op. |
-> Tensor v'2 tidx | indices: indices passed to the corresponding SparseSegmentSqrtN op. |
-> Tensor v'3 Int32 | segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op. |
-> Tensor v'4 Int32 | output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op. |
-> Tensor Build t | output |
:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | data |
-> Tensor v'2 tidx | indices: A 1-D tensor. Has same rank as |
-> Tensor v'3 Int32 | segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
Computes the sum along sparse segments of a tensor.
Read the section on Segmentation for an explanation of segments.
Like SegmentSum
, but segment_ids
can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by indices
.
For example:
```prettyprint c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment. tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) ==> [[0 0 0 0]]
# Select two rows, two segment. tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) ==> [[ 1 2 3 4] [-1 -2 -3 -4]]
# Select all rows, two segments. tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) ==> [[0 0 0 0] [5 6 7 8]]
# Which is equivalent to: tf.segment_sum(c, tf.constant([0, 0, 1])) ```
:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> OpParams | |
-> Tensor v'1 t | data |
-> Tensor v'2 tidx | indices: A 1-D tensor. Has same rank as |
-> Tensor v'3 Int32 | segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
-> Tensor Build t | output: Has same shape as data, except for dimension 0 which
has size |
:: OneOf `[Double, Float]` t | |
=> Tensor v'1 Int64 | sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a SparseTensor, in canonical ordering. |
-> Tensor v'2 t | sp_values: 1-D. |
-> Tensor v'3 Int64 | sp_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor Build t | output: 1-D. The |
Applies softmax to a batched N-D SparseTensor
.
The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost logical submatrix with shape `[B, C]`, but with the catch that *the implicitly zero elements do not participate*. Specifically, the algorithm is equivalent to the following:
- Applies `tf.nn.softmax()` to a densified view of each innermost submatrix with shape `[B, C]`, along the size-C dimension;
- Masks out the original implicitly-zero locations;
- Renormalizes the remaining elements.
Hence, the SparseTensor
result has exactly the same non-zero indices and
shape.
:: OneOf `[Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int64 | sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a SparseTensor, in canonical ordering. |
-> Tensor v'2 t | sp_values: 1-D. |
-> Tensor v'3 Int64 | sp_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor Build t | output: 1-D. The |
sparseSoftmaxCrossEntropyWithLogits
:: (OneOf `[Word16, Double, Float]` t, OneOf `[Int32, Int64]` tlabels) | |
=> Tensor v'1 t | features: batch_size x num_classes matrix |
-> Tensor v'2 tlabels | labels: batch_size vector with values in [0, num_classes). This is the label for the given minibatch entry. |
-> (Tensor Build t, Tensor Build t) | (loss, backprop)
|
Computes softmax cross entropy cost and gradients to backpropagate.
Unlike SoftmaxCrossEntropyWithLogits
, this operation does not accept
a matrix of label probabilities, but rather a single label per row
of features. This label is considered to have probability 1.0 for the
given row.
Inputs are the logits, not probabilities.
sparseSoftmaxCrossEntropyWithLogits'
:: (OneOf `[Word16, Double, Float]` t, OneOf `[Int32, Int64]` tlabels) | |
=> OpParams | |
-> Tensor v'1 t | features: batch_size x num_classes matrix |
-> Tensor v'2 tlabels | labels: batch_size vector with values in [0, num_classes). This is the label for the given minibatch entry. |
-> (Tensor Build t, Tensor Build t) | (loss, backprop)
|
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 Int64 | a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, in the canonical lexicographic ordering. |
-> Tensor v'2 t | a_values: 1-D. |
-> Tensor v'3 Int64 | a_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 Int64 | b_indices: counterpart to |
-> Tensor v'5 t | b_values: counterpart to |
-> Tensor v'6 Int64 | b_shape: counterpart to |
-> (Tensor Build Int64, Tensor Build t) | (output_indices, output_values)
|
Returns the element-wise max of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int64 | a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, in the canonical lexicographic ordering. |
-> Tensor v'2 t | a_values: 1-D. |
-> Tensor v'3 Int64 | a_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 Int64 | b_indices: counterpart to |
-> Tensor v'5 t | b_values: counterpart to |
-> Tensor v'6 Int64 | b_shape: counterpart to |
-> (Tensor Build Int64, Tensor Build t) | (output_indices, output_values)
|
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 Int64 | a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, in the canonical lexicographic ordering. |
-> Tensor v'2 t | a_values: 1-D. |
-> Tensor v'3 Int64 | a_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 Int64 | b_indices: counterpart to |
-> Tensor v'5 t | b_values: counterpart to |
-> Tensor v'6 Int64 | b_shape: counterpart to |
-> (Tensor Build Int64, Tensor Build t) | (output_indices, output_values)
|
Returns the element-wise min of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 Int64 | a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a SparseTensor, in the canonical lexicographic ordering. |
-> Tensor v'2 t | a_values: 1-D. |
-> Tensor v'3 Int64 | a_shape: 1-D. Shape of the input SparseTensor. |
-> Tensor v'4 Int64 | b_indices: counterpart to |
-> Tensor v'5 t | b_values: counterpart to |
-> Tensor v'6 Int64 | b_shape: counterpart to |
-> (Tensor Build Int64, Tensor Build t) | (output_indices, output_values)
|
:: TensorType t | |
=> Int64 | num_split: The number of ways to split. |
-> Tensor v'1 Int64 | split_dim: 0-D. The dimension along which to split. Must be in the range `[0, rank(shape))`. |
-> Tensor v'2 Int64 | indices: 2-D tensor represents the indices of the sparse tensor. |
-> Tensor v'3 t | values: 1-D tensor represents the values of the sparse tensor. |
-> Tensor v'4 Int64 | shape: 1-D. tensor represents the shape of the sparse tensor. output indices: A list of 1-D tensors represents the indices of the output sparse tensors. |
-> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64]) | (output_indices, output_values, output_shape)
|
Split a SparseTensor
into num_split
tensors along one dimension.
If the `shape[split_dim]` is not an integer multiple of num_split
. Slices
`[0 : shape[split_dim] % num_split]` gets one extra dimension.
For example, if `split_dim = 1` and `num_split = 2` and the input is
input_tensor = shape = [2, 7] [ a d e ] [b c ]
Graphically the output tensors are:
output_tensor[0] = shape = [2, 4] [ a ] [b c ]
output_tensor[1] = shape = [2, 3] [ d e ] [ ]
:: TensorType t | |
=> OpParams | |
-> Int64 | num_split: The number of ways to split. |
-> Tensor v'1 Int64 | split_dim: 0-D. The dimension along which to split. Must be in the range `[0, rank(shape))`. |
-> Tensor v'2 Int64 | indices: 2-D tensor represents the indices of the sparse tensor. |
-> Tensor v'3 t | values: 1-D tensor represents the values of the sparse tensor. |
-> Tensor v'4 Int64 | shape: 1-D. tensor represents the shape of the sparse tensor. output indices: A list of 1-D tensors represents the indices of the output sparse tensors. |
-> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64]) | (output_indices, output_values, output_shape)
|
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor v'1 tindices | a_indices: 2-D. The |
-> Tensor v'2 t | a_values: 1-D. The |
-> Tensor v'3 tindices | a_shape: 1-D. The |
-> Tensor v'4 t | b: |
-> Tensor Build t | output |
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor v'1 tindices | a_indices: 2-D. The |
-> Tensor v'2 t | a_values: 1-D. The |
-> Tensor v'3 tindices | a_shape: 1-D. The |
-> Tensor v'4 t | b: |
-> Tensor Build t | output |
:: TensorType t | |
=> Tensor v'1 Int64 | a_indices: 2-D. The |
-> Tensor v'2 t | a_values: 1-D. The |
-> Tensor v'3 Int64 | a_shape: 1-D. The |
-> Tensor v'4 t | b: 2-D. A dense Matrix. |
-> Tensor Build t | product |
Multiply SparseTensor (of rank 2) A by dense matrix B.
No validity checking is performed on the indices of A. However, the following input format is recommended for optimal behavior:
if adjoint_a == false: A should be sorted in lexicographically increasing order. Use SparseReorder if you're not sure. if adjoint_a == true: A should be sorted in order of increasing dimension 1 (i.e., "column major" order instead of "row major" order).
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 Int64 | a_indices: 2-D. The |
-> Tensor v'2 t | a_values: 1-D. The |
-> Tensor v'3 Int64 | a_shape: 1-D. The |
-> Tensor v'4 t | b: 2-D. A dense Matrix. |
-> Tensor Build t | product |
:: (TensorType t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor v'1 tindices | sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete index where `sparse_values[i]` will be placed. |
-> Tensor v'2 tindices | output_shape: 1-D. Shape of the dense output tensor. |
-> Tensor v'3 t | sparse_values: 1-D. Values corresponding to each row of |
-> Tensor v'4 t | default_value: Scalar value to set for indices not specified in
|
-> Tensor Build t | dense: Dense output tensor of shape |
Converts a sparse representation into a dense tensor.
Builds an array dense
with shape output_shape
such that
```prettyprint # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n) dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] ```
All other values in dense
are set to default_value
. If sparse_values
is a
scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If validate_indices
is true, these properties
are checked during execution.
:: (TensorType t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor v'1 tindices | sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete index where `sparse_values[i]` will be placed. |
-> Tensor v'2 tindices | output_shape: 1-D. Shape of the dense output tensor. |
-> Tensor v'3 t | sparse_values: 1-D. Values corresponding to each row of |
-> Tensor v'4 t | default_value: Scalar value to set for indices not specified in
|
-> Tensor Build t | dense: Dense output tensor of shape |
:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t | |
=> Tensor v'1 Int64 | set1_indices: 2D |
-> Tensor v'2 t | set1_values: 1D |
-> Tensor v'3 Int64 | set1_shape: 1D |
-> Tensor v'4 Int64 | set2_indices: 2D |
-> Tensor v'5 t | set2_values: 1D |
-> Tensor v'6 Int64 | set2_shape: 1D |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (result_indices, result_values, result_shape)
|
Applies set operation along last dimension of 2 SparseTensor
inputs.
See SetOperationOp::SetOperationFromContext for values of set_operation
.
If validate_indices
is True
, SparseToSparseSetOperation
validates the
order and range of set1
and set2
indices.
Input set1
is a SparseTensor
represented by set1_indices
, set1_values
,
and set1_shape
. For set1
ranked n
, 1st `n-1` dimensions must be the same
as set2
. Dimension n
contains values in a set, duplicates are allowed but
ignored.
Input set2
is a SparseTensor
represented by set2_indices
, set2_values
,
and set2_shape
. For set2
ranked n
, 1st `n-1` dimensions must be the same
as set1
. Dimension n
contains values in a set, duplicates are allowed but
ignored.
If validate_indices
is True
, this op validates the order and range of set1
and set2
indices.
Output result
is a SparseTensor
represented by result_indices
,
result_values
, and result_shape
. For set1
and set2
ranked n
, this
has rank n
and the same 1st `n-1` dimensions as set1
and set2
. The nth
dimension contains the result of set_operation
applied to the corresponding
`[0...n-1]` dimension of set
.
:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t | |
=> OpParams | |
-> Tensor v'1 Int64 | set1_indices: 2D |
-> Tensor v'2 t | set1_values: 1D |
-> Tensor v'3 Int64 | set1_shape: 1D |
-> Tensor v'4 Int64 | set2_indices: 2D |
-> Tensor v'5 t | set2_values: 1D |
-> Tensor v'6 Int64 | set2_shape: 1D |
-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) | (result_indices, result_values, result_shape)
|
:: TensorType t | |
=> Int64 | num_split: The number of ways to split. Must evenly divide `value.shape[split_dim]`. |
-> Tensor v'1 Int32 | split_dim: 0-D. The dimension along which to split. Must be in the range `[0, rank(value))`. |
-> Tensor v'2 t | value: The tensor to split. |
-> [Tensor Build t] | output: They are identically shaped tensors, whose shape matches that of |
Splits a tensor into num_split
tensors along one dimension.
:: TensorType t | |
=> OpParams | |
-> Int64 | num_split: The number of ways to split. Must evenly divide `value.shape[split_dim]`. |
-> Tensor v'1 Int32 | split_dim: 0-D. The dimension along which to split. Must be in the range `[0, rank(value))`. |
-> Tensor v'2 t | value: The tensor to split. |
-> [Tensor Build t] | output: They are identically shaped tensors, whose shape matches that of |
:: (TensorType t, OneOf `[Int32, Int64]` tlen) | |
=> Int64 | num_split |
-> Tensor v'1 t | value: The tensor to split. |
-> Tensor v'2 tlen | size_splits: list containing the sizes of each output tensor along the split dimension. Must sum to the dimension of value along split_dim. Can contain one -1 indicating that dimension is to be inferred. |
-> Tensor v'3 Int32 | split_dim: 0-D. The dimension along which to split. Must be in the range `[0, rank(value))`. |
-> [Tensor Build t] | output: Tensors whose shape matches that of |
Splits a tensor into num_split
tensors along one dimension.
:: (TensorType t, OneOf `[Int32, Int64]` tlen) | |
=> OpParams | |
-> Int64 | num_split |
-> Tensor v'1 t | value: The tensor to split. |
-> Tensor v'2 tlen | size_splits: list containing the sizes of each output tensor along the split dimension. Must sum to the dimension of value along split_dim. Can contain one -1 indicating that dimension is to be inferred. |
-> Tensor v'3 Int32 | split_dim: 0-D. The dimension along which to split. Must be in the range `[0, rank(value))`. |
-> [Tensor Build t] | output: Tensors whose shape matches that of |
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes square root of x element-wise.
I.e., \(y = sqrt{x} = x^{1/2}\).
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Computes the gradient for the sqrt of x
wrt its input.
Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and dy
is the corresponding input gradient.
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes square of x element-wise.
I.e., \(y = x * x = x^2\).
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Returns (x - y)(x - y) element-wise.
- NOTE*:
SquaredDifference
supports broadcasting. More about broadcasting here
:: TensorType t | |
=> Tensor v'1 t | input: The |
-> Tensor Build t | output: Contains the same data as |
Removes dimensions of size 1 from the shape of a tensor.
Given a tensor input
, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
squeeze_dims
.
For example:
```prettyprint
# t
is a tensor of shape [1, 2, 1, 3, 1, 1]
shape(squeeze(t)) ==> [2, 3]
```
Or, to remove specific size 1 dimensions:
```prettyprint
# t
is a tensor of shape [1, 2, 1, 3, 1, 1]
shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
```
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | input: The |
-> Tensor Build t | output: Contains the same data as |
:: MonadBuild m' | |
=> DataType | elem_type: The type of the elements on the stack. |
-> m' (Tensor Ref ByteString) | handle: The handle to the stack. |
A stack that produces elements in first-in last-out order.
:: MonadBuild m' | |
=> OpParams | |
-> DataType | elem_type: The type of the elements on the stack. |
-> m' (Tensor Ref ByteString) | handle: The handle to the stack. |
:: MonadBuild m' | |
=> Tensor Ref ByteString | handle: The handle to a stack. |
-> m' ControlNode |
Delete the stack from its resource container.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a stack. |
-> m' ControlNode |
:: (MonadBuild m', TensorType elem_type) | |
=> Tensor Ref ByteString | handle: The handle to a stack. |
-> m' (Tensor Value elem_type) | elem: The tensor that is popped from the top of the stack. |
Pop the element at the top of the stack.
:: (MonadBuild m', TensorType elem_type) | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a stack. |
-> m' (Tensor Value elem_type) | elem: The tensor that is popped from the top of the stack. |
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref ByteString | handle: The handle to a stack. |
-> Tensor v'2 t | elem: The tensor to be pushed onto the stack. |
-> m' (Tensor Value t) | output: The same tensor as the input |
Push an element onto the stack.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor Ref ByteString | handle: The handle to a stack. |
-> Tensor v'2 t | elem: The tensor to be pushed onto the stack. |
-> m' (Tensor Value t) | output: The same tensor as the input |
:: (MonadBuild m', TensorTypes dtypes) | |
=> TensorList v'1 dtypes | values: a list of tensors |
-> m' ControlNode |
Stage values similar to a lightweight Enqueue. The basic functionality of this
Op is similar to a queue with many fewer capabilities and options. This Op is optimized for performance.
:: (MonadBuild m', TensorTypes dtypes) | |
=> OpParams | |
-> TensorList v'1 dtypes | values: a list of tensors |
-> m' ControlNode |
:: TensorType t | |
=> Tensor v'1 t | input |
-> Tensor Build t | output |
Stops gradient computation.
When executed in a graph, this op outputs its input tensor as-is.
When building ops to compute gradients, this op prevents the contribution of
its inputs to be taken into account. Normally, the gradient generator adds ops
to a graph to compute the derivatives of a specified loss
by recursively
finding out inputs that contributed to its computation. If you insert this op
in the graph it inputs are masked from the gradient generator. They are not
taken into account for computing gradients.
This is useful any time you want to compute a value with TensorFlow but need to pretend that the value was a constant. Some examples include:
- The *EM* algorithm where the *M-step* should not involve backpropagation through the output of the *E-step*.
- Contrastive divergence training of Boltzmann machines where, when differentiating the energy function, the training must not backpropagate through the graph that generated the samples from the model.
- Adversarial training, where no backprop should happen through the adversarial example generation process.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | input |
-> Tensor Build t | output |
:: (TensorType t, OneOf `[Int32, Int64]` index) | |
=> Tensor v'1 t | input |
-> Tensor v'2 index | begin: `begin[k]` specifies the offset into the |
-> Tensor v'3 index | end: `end[i]` is like |
-> Tensor v'4 index | strides: `strides[i]` specifies the increment in the |
-> Tensor Build t | output |
Return a strided slice from input
.
Note, most python users will want to use the Python __getitem__
or __getitem__
rather than this op directly.
The goal of this op is to produce a new tensor with a subset of
the elements from the n
dimensional input
tensor. The subset is chosen using
a sequence of m
sparse range specifications encoded into the arguments
of this function. Note, in some cases
m
could be equal to n
, but this need not be the case. Each
range specification entry can be one of the following:
- An ellipsis (...). Ellipses are used to imply zero or more
dimensions of full-dimension selection and are produced using
ellipsis_mask
. For example, `foo[...]` is the identity slice. - A new axis. This is used to insert a new shape=1 dimension and is
produced using
new_axis_mask
. For example, `foo[:, ...]` wherefoo
is shape `(3, 4)` produces a `(1, 3, 4)` tensor. - A range `begin:end:stride`. This is used to specify how much to choose from
a given dimension.
stride
can be any integer but 0.begin
is an integer which represents the index of the first value to select whileend
represents the index of the last value to select. The number of values selected in each dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.begin
andend
can be negative where `-1` is the last element, `-2` is the second to last.begin_mask
controls whether to replace the explicitly givenbegin
with an implicit effective value of `0` if `stride > 0` and `-1` if `stride < 0`.end_mask
is analogous but produces the number required to create the largest open interval. For example, given a shape `(3,)` tensor `foo[:]`, the effectivebegin
andend
are `0` and `3`. Do not assume this is equivalent to `foo[0:-1]` which has an effectivebegin
andend
of `0` and `2`. Another example is `foo[-2::-1]` which reverses the first dimension of a tensor while dropping the last two (in the original order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. - A single index. This is used to keep only elements that have a given
index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
shape `(6,)` tensor. This is encoded in
begin
andend
andshrink_axis_mask
.
Each conceptual range specification is encoded in the op's argument. This encoding is best understand by considering a non-trivial example. In particular, `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
```prettyprint begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) end = [2, 4, x, x, -3, x] strides = [1, 1, x, x, -1, 1] begin_mask = 1<<4 | 1 << 5 = 48 end_mask = 1<<5 = 32 ellipsis_mask = 1<<3 = 8 new_axis_mask = 1<<2 4 shrink_axis_mask = 1<<0 ```
In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of the slice becomes (2, 1, 5, 5, 2, 5). Let us walk step by step through each argument specification.
- The first argument in the example slice is turned into `begin = 1` and
`end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
also set the appropriate bit in
shrink_axis_mask
. - `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have zero bits contributed.
- None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 dimension in the final shape. Dummy values are contributed to begin, end and stride, while the new_axis_mask bit is set.
...
grab the full ranges from as many dimensions as needed to fully specify a slice for every dimension of the input shape.- `:-3:-1` shows the use of negative indices. A negative index
i
associated with a dimension that has shapes
is converted to a positive index `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion is done internally so begin, end and strides receive x, -3, and -1. The appropriate begin_mask bit is set to indicate the start range is the full range (ignoring the x). :
indicates that the entire contents of the corresponding dimension is selected. This is equivalent to `::` or `0::1`. begin, end, and strides receive 0, 0, and 1, respectively. The appropriate bits inbegin_mask
andend_mask
are also set.
- Requirements*: `0 != strides[i] for i in [0, m)` `ellipsis_mask must be a power of two (only one ellipsis)`
:: (TensorType t, OneOf `[Int32, Int64]` index) | |
=> OpParams | |
-> Tensor v'1 t | input |
-> Tensor v'2 index | begin: `begin[k]` specifies the offset into the |
-> Tensor v'3 index | end: `end[i]` is like |
-> Tensor v'4 index | strides: `strides[i]` specifies the increment in the |
-> Tensor Build t | output |
:: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` index) | |
=> Tensor Ref t | ref |
-> Tensor v'2 index | begin |
-> Tensor v'3 index | end |
-> Tensor v'4 index | strides |
-> Tensor v'5 t | value |
-> m' (Tensor Ref t) | output_ref |
Assign value
to the sliced l-value reference of ref
.
The values of value
are assigned to the positions in the variable
ref
that are selected by the slice parameters. The slice parameters
`begin, end
, strides
, etc. work exactly as in StridedSlice
.
NOTE this op currently does not support broadcasting and so value
's
shape must be exactly the shape produced by the slice of ref
.
:: (TensorType t, OneOf `[Int32, Int64]` index) | |
=> Tensor v'1 index | shape |
-> Tensor v'2 index | begin |
-> Tensor v'3 index | end |
-> Tensor v'4 index | strides |
-> Tensor v'5 t | dy |
-> Tensor Build t | output |
Returns the gradient of StridedSlice
.
Since StridedSlice
cuts out pieces of its input
which is size
shape
, its gradient will have the same shape (which is passed here
as shape
). The gradient will be zero in any element that the slice
does not select.
Arguments are the same as StridedSliceGrad with the exception that
dy
is the input gradient to be propagated and shape
is the
shape of StridedSlice
's input
.
:: [Tensor v'1 ByteString] | inputs: A list of string tensors. The tensors must all have the same shape, or be scalars. Scalars may be mixed in; these will be broadcast to the shape of non-scalar inputs. |
-> Tensor Build ByteString | output |
Joins the strings in the given list of string tensors into one tensor;
with the given separator (default is an empty separator).
:: OpParams | |
-> [Tensor v'1 ByteString] | inputs: A list of string tensors. The tensors must all have the same shape, or be scalars. Scalars may be mixed in; these will be broadcast to the shape of non-scalar inputs. |
-> Tensor Build ByteString | output |
:: Tensor v'1 ByteString | input: 1-D. Strings to split. |
-> Tensor v'2 ByteString | delimiter: 0-D. Delimiter characters (bytes), or empty string. |
-> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64) | (indices, values, shape)
|
Split elements of input
based on delimiter
into a SparseTensor
.
Let N be the size of source (typically N will be the batch size). Split each
element of input
based on delimiter
and return a SparseTensor
containing the splitted tokens. Empty tokens are ignored.
delimiter
can be empty, or a string of split characters. If delimiter
is an
empty string, each element of input
is split into individual single-byte
character strings, including splitting of UTF-8 multibyte sequences. Otherwise
every character of delimiter
is a potential split point.
For example: N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output will be
indices = [0, 0;
0, 1;
1, 0;
1, 1;
1, 2]
shape = [2, 3]
values = [hello
, world
, a
, b
, c
]
:: OpParams | |
-> Tensor v'1 ByteString | input: 1-D. Strings to split. |
-> Tensor v'2 ByteString | delimiter: 0-D. Delimiter characters (bytes), or empty string. |
-> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64) | (indices, values, shape)
|
:: Int64 | num_buckets: The number of buckets. |
-> Tensor v'1 ByteString | string_tensor |
-> Tensor Build Int64 | output: A Tensor of the same shape as the input |
Converts each string in the input Tensor to its hash mod by a number of buckets.
The hash function is deterministic on the content of the string within the process.
Note that the hash function may change from time to time. This functionality will be deprecated and it's recommended to use `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
:: Int64 | num_buckets: The number of buckets. |
-> Tensor v'1 ByteString | input: The strings to assign a hash bucket. |
-> Tensor Build Int64 | output: A Tensor of the same shape as the input |
Converts each string in the input Tensor to its hash mod by a number of buckets.
The hash function is deterministic on the content of the string within the process and will never change. However, it is not suitable for cryptography. This function may be used when CPU time is scarce and inputs are trusted or unimportant. There is a risk of adversaries constructing inputs that all hash to the same bucket. To prevent this problem, use a strong hash function with `tf.string_to_hash_bucket_strong`.
:: Int64 | num_buckets: The number of buckets. |
-> Tensor v'1 ByteString | input: The strings to assign a hash bucket. |
-> Tensor Build Int64 | output: A Tensor of the same shape as the input |
Converts each string in the input Tensor to its hash mod by a number of buckets.
The hash function is deterministic on the content of the string within the
process. The hash function is a keyed hash function, where attribute key
defines the key of the hash function. key
is an array of 2 elements.
A strong hash is important when inputs may be malicious, e.g. URLs with additional components. Adversaries could try to make their inputs hash to the same bucket for a denial-of-service attack or to skew the results. A strong hash prevents this by making it dificult, if not infeasible, to compute inputs that hash to the same bucket. This comes at a cost of roughly 4x higher compute time than `tf.string_to_hash_bucket_fast`.
:: OneOf `[Int32, Float]` out_type | |
=> Tensor v'1 ByteString | string_tensor |
-> Tensor Build out_type | output: A Tensor of the same shape as the input |
Converts each string in the input Tensor to the specified numeric type.
(Note that int32 overflow results in an error while float overflow results in a rounded value.)
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Returns x - y element-wise.
- NOTE*:
Sub
supports broadcasting. More about broadcasting here
:: OneOf `[Int32, Int64]` t | |
=> Tensor v'1 ByteString | input: Tensor of strings |
-> Tensor v'2 t | pos: Scalar defining the position of first character in each substring |
-> Tensor v'3 t | len: Scalar defining the number of characters to include in each substring |
-> Tensor Build ByteString | output: Tensor of substrings |
Return substrings from Tensor
of strings.
For each string in the input Tensor
, creates a substring starting at index
pos
with a total length of len
.
If len
defines a substring that would extend beyond the length of the input
string, then as many characters as possible are used.
If pos
is negative or specifies a character index larger than any of the input
strings, then an InvalidArgumentError
is thrown.
pos
and len
must have the same shape, otherwise a ValueError
is thrown on
Op creation.
- NOTE*:
Substr
supports broadcasting up to two dimensions. More about broadcasting here - --
Examples
Using scalar pos
and len
:
```
input = [bHello
, bWorld
]
position = 1
length = 3
output = [bell
, borl
]
```
Using pos
and len
with same shape as input
:
```
input = [[bten
, beleven
, btwelve
],
[bthirteen
, bfourteen
, bfifteen
],
[bsixteen
, bseventeen
, beighteen
]]
position = [[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]
length = [[2, 3, 4],
[4, 3, 2],
[5, 5, 5]]
output = [[ben
, beve
, blve
],
[bhirt
, burt
, bte
],
[bixtee
, bvente
, bhteen
]]
```
Broadcasting pos
and len
onto input
:
```
input = [[bten
, beleven
, btwelve
],
[bthirteen
, bfourteen
, bfifteen
],
[bsixteen
, bseventeen
, beighteen
],
[bnineteen
, btwenty
, btwentyone
]]
position = [1, 2, 3]
length = [1, 2, 3]
output = [[be
, bev
, blve
],
[bh
, bur
, btee
],
[bi
, bve
, bhte
],
[bi
, ben
, bnty
]]
```
Broadcasting input
onto pos
and len
:
```
input = bthirteen
position = [1, 5, 7]
length = [3, 2, 1]
output = [bhir
, bee
, b'n"]
```
:: OneOf `[Int32, Int64]` t | |
=> OpParams | |
-> Tensor v'1 ByteString | input: Tensor of strings |
-> Tensor v'2 t | pos: Scalar defining the position of first character in each substring |
-> Tensor v'3 t | len: Scalar defining the number of characters to include in each substring |
-> Tensor Build ByteString | output: Tensor of substrings |
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) | |
=> Tensor v'1 t | input: The tensor to reduce. |
-> Tensor v'2 tidx | reduction_indices: The dimensions to reduce. |
-> Tensor Build t | output: The reduced tensor. |
Computes the sum of elements across dimensions of a tensor.
Reduces input
along the dimensions given in reduction_indices
. Unless
keep_dims
is true, the rank of the tensor is reduced by 1 for each entry in
reduction_indices
. If keep_dims
is true, the reduced dimensions are
retained with length 1.
:: OneOf `[Complex Double, Complex Float, Double, Float]` t | |
=> Tensor v'1 t | input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form matrices of size `[M, N]`. Let |
-> (Tensor Build t, Tensor Build t, Tensor Build t) | (s, u, v)
|
Computes the singular value decompositions of one or more matrices.
Computes the SVD of each inner matrix in input
such that
`input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
```prettyprint # a is a tensor containing a batch of matrices. # s is a tensor of singular values for each matrix. # u is the tensor containing of left singular vectors for each matrix. # v is the tensor containing of right singular vectors for each matrix. s, u, v = svd(a) s, _, _ = svd(a, compute_uv=False) ```
:: OneOf `[Complex Double, Complex Float, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form matrices of size `[M, N]`. Let |
-> (Tensor Build t, Tensor Build t, Tensor Build t) | (s, u, v)
|
:: MonadBuild m' | |
=> m' (Tensor Ref ByteString) | reader_handle: The handle to reference the Reader. |
A Reader that outputs the records from a TensorFlow Records file.
:: MonadBuild m' | |
=> OpParams | |
-> m' (Tensor Ref ByteString) | reader_handle: The handle to reference the Reader. |
:: MonadBuild m' | |
=> m' ResourceHandle | reader_handle: The handle to reference the Reader. |
A Reader that outputs the records from a TensorFlow Records file.
:: MonadBuild m' | |
=> OpParams | |
-> m' ResourceHandle | reader_handle: The handle to reference the Reader. |
:: (MonadBuild m', TensorType dtype) | |
=> Tensor v'1 Int64 | sparse_handles: 1-D, The |
-> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64) | (sparse_indices, sparse_values, sparse_shape)
|
Read SparseTensors
from a SparseTensorsMap
and concatenate them.
The input sparse_handles
must be an int64
matrix of shape `[N, 1]` where
N
is the minibatch size and the rows correspond to the output handles of
AddSparseToTensorsMap
or AddManySparseToTensorsMap
. The ranks of the
original SparseTensor
objects that went into the given input ops must all
match. When the final SparseTensor
is created, it has rank one
higher than the ranks of the incoming SparseTensor
objects
(they have been concatenated along a new row dimension on the left).
The output SparseTensor
object's shape values for all dimensions but the
first are the max across the input SparseTensor
objects' shape values
for the corresponding dimensions. Its first shape value is N
, the minibatch
size.
The input SparseTensor
objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run SparseReorder
to restore index ordering.
For example, if the handles represent an input, which is a `[2, 3]` matrix
representing two original SparseTensor
objects:
``` index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] ```
and
``` index = [ 2] [10] values = [4, 5] shape = [30] ```
then the final SparseTensor
will be:
``` index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] ```
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> Tensor v'1 Int64 | sparse_handles: 1-D, The |
-> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64) | (sparse_indices, sparse_values, sparse_shape)
|
:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes tan of x element-wise.
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor Build t | y |
Computes hyperbolic tangent of x
element-wise.
:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Computes the gradient for the tanh of x
wrt its input.
Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and dy
is the corresponding input gradient.
:: (MonadBuild m', TensorType dtype) | |
=> Shape | shape: The shape of the variable tensor. |
-> m' (Tensor Ref dtype) | ref: A reference to the variable tensor. |
Returns a tensor that may be mutated, but only persists within a single step.
This is an experimental op for internal use only and it is possible to use this op in unsafe ways. DO NOT USE unless you fully understand the risks.
It is the caller's responsibility to ensure that ref
is eventually passed to a
matching DestroyTemporaryVariable
op after all other uses have completed.
Outputs a ref to the tensor state so it may be read or modified.
E.g. var = state_ops._temporary_variable([1, 2], types.float_) var_name = var.op.name var = state_ops.assign(var, [[4.0, 5.0]]) var = state_ops.assign_add(var, [[6.0, 7.0]]) final = state_ops._destroy_temporary_variable(var, var_name=var_name)
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> Shape | shape: The shape of the variable tensor. |
-> m' (Tensor Ref dtype) | ref: A reference to the variable tensor. |
:: MonadBuild m' | |
=> DataType | dtype |
-> Tensor v'1 Int32 | size |
-> m' (Tensor Ref ByteString) | handle |
:: MonadBuild m' | |
=> OpParams | |
-> DataType | dtype |
-> Tensor v'1 Int32 | size |
-> m' (Tensor Ref ByteString) | handle |
:: MonadBuild m' | |
=> Tensor Ref ByteString | handle |
-> m' ControlNode |
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | handle |
-> m' ControlNode |
:: MonadBuild m' | |
=> Tensor v'1 ByteString | handle |
-> m' ControlNode |
Deprecated. Use TensorArrayCloseV3
:: MonadBuild m' | |
=> OpParams | |
-> Tensor v'1 ByteString | handle |
-> m' ControlNode |
:: MonadBuild m' | |
=> ResourceHandle | handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). |
-> m' ControlNode |
Delete the TensorArray from its resource container. This enables
the user to close and release the resource in the middle of a step/run.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). |
-> m' ControlNode |
:: (MonadBuild m', TensorType dtype) | |
=> Tensor Ref ByteString | handle |
-> Tensor v'2 Float | flow_in |
-> m' (Tensor Value dtype, Tensor Value Int64) | (value, lengths)
|
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> Tensor Ref ByteString | handle |
-> Tensor v'2 Float | flow_in |
-> m' (Tensor Value dtype, Tensor Value Int64) | (value, lengths)
|
:: TensorType dtype | |
=> Tensor v'1 ByteString | handle |
-> Tensor v'2 Float | flow_in |
-> (Tensor Build dtype, Tensor Build Int64) | (value, lengths)
|
Deprecated. Use TensorArrayConcatV3
:: (MonadBuild m', TensorType dtype) | |
=> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value dtype, Tensor Value Int64) | (value, lengths)
|
Concat the elements from the TensorArray into value value
.
Takes T
elements of shapes
``` (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) ```
and concatenates them into a Tensor of shape:
```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
All elements must have the same shape (excepting the first dimension).
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value dtype, Tensor Value Int64) | (value, lengths)
|
:: (MonadBuild m', TensorType dtype) | |
=> Tensor Ref ByteString | handle |
-> Tensor v'2 Int32 | indices |
-> Tensor v'3 Float | flow_in |
-> m' (Tensor Value dtype) | value |
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> Tensor Ref ByteString | handle |
-> Tensor v'2 Int32 | indices |
-> Tensor v'3 Float | flow_in |
-> m' (Tensor Value dtype) | value |
:: TensorType dtype | |
=> Tensor v'1 ByteString | handle |
-> Tensor v'2 Int32 | indices |
-> Tensor v'3 Float | flow_in |
-> Tensor Build dtype | value |
Deprecated. Use TensorArrayGatherV3
:: TensorType dtype | |
=> OpParams | |
-> Tensor v'1 ByteString | handle |
-> Tensor v'2 Int32 | indices |
-> Tensor v'3 Float | flow_in |
-> Tensor Build dtype | value |
:: (MonadBuild m', TensorType dtype) | |
=> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 Int32 | indices: The locations in the TensorArray from which to read tensor elements. |
-> Tensor v'3 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value dtype) | value: All of the elements in the TensorArray, concatenated along a new axis (the new dimension 0). |
Gather specific elements from the TensorArray into output value
.
All elements selected by indices
must have the same shape.
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 Int32 | indices: The locations in the TensorArray from which to read tensor elements. |
-> Tensor v'3 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value dtype) | value: All of the elements in the TensorArray, concatenated along a new axis (the new dimension 0). |
:: MonadBuild m' | |
=> Tensor v'1 ByteString | handle |
-> Tensor v'2 Float | flow_in |
-> m' (Tensor Ref ByteString) | grad_handle |
:: MonadBuild m' | |
=> OpParams | |
-> Tensor v'1 ByteString | handle |
-> Tensor v'2 Float | flow_in |
-> m' (Tensor Ref ByteString) | grad_handle |
:: MonadBuild m' | |
=> Tensor v'1 ByteString | handle |
-> Tensor v'2 Float | flow_in |
-> m' (Tensor Value ByteString) | grad_handle |
Deprecated. Use TensorArrayGradV3
:: MonadBuild m' | |
=> OpParams | |
-> Tensor v'1 ByteString | handle |
-> Tensor v'2 Float | flow_in |
-> m' (Tensor Value ByteString) | grad_handle |
:: MonadBuild m' | |
=> ResourceHandle | handle: The handle to the forward TensorArray. |
-> Tensor v'2 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (ResourceHandle, Tensor Value Float) | (grad_handle, flow_out)
|
Creates a TensorArray for storing the gradients of values in the given handle.
If the given TensorArray gradient already exists, returns a reference to it.
Locks the size of the original TensorArray by disabling its dynamic size flag.
- *A note about the input flow_in:**
The handle flow_in forces the execution of the gradient lookup to occur only after certain other operations have occurred. For example, when the forward TensorArray is dynamically sized, writes to this TensorArray may resize the object. The gradient TensorArray is statically sized based on the size of the forward TensorArray when this operation executes. Furthermore, the size of the forward TensorArray is frozen by this call. As a result, the flow is used to ensure that the call to generate the gradient TensorArray only happens after all writes are executed.
In the case of dynamically sized TensorArrays, gradient computation should only be performed on read operations that have themselves been chained via flow to occur only after all writes have executed. That way the final size of the forward TensorArray is known when this operation is called.
- *A note about the source attribute:**
TensorArray gradient calls use an accumulator TensorArray object. If multiple gradients are calculated and run in the same session, the multiple gradient nodes may accidentally flow throuth the same accumulator TensorArray. This double counts and generally breaks the TensorArray gradient flow.
The solution is to identify which gradient call this particular
TensorArray gradient is being called in. This is performed by identifying
a unique string (e.g. "gradients", "gradients_1", ...) from the input
gradient Tensor's name. This string is used as a suffix when creating
the TensorArray gradient object here (the attribute source
).
The attribute source
is added as a suffix to the forward TensorArray's
name when performing the creation / lookup, so that each separate gradient
calculation gets its own TensorArray accumulator.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | handle: The handle to the forward TensorArray. |
-> Tensor v'2 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (ResourceHandle, Tensor Value Float) | (grad_handle, flow_out)
|
:: (MonadBuild m', TensorType dtype) | |
=> Tensor Ref ByteString | handle |
-> Tensor v'2 Float | flow_in |
-> m' (Tensor Value dtype) | value |
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> Tensor Ref ByteString | handle |
-> Tensor v'2 Float | flow_in |
-> m' (Tensor Value dtype) | value |
:: (MonadBuild m', TensorType dtype) | |
=> Tensor Ref ByteString | handle |
-> Tensor v'2 Int32 | index |
-> Tensor v'3 Float | flow_in |
-> m' (Tensor Value dtype) | value |
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> Tensor Ref ByteString | handle |
-> Tensor v'2 Int32 | index |
-> Tensor v'3 Float | flow_in |
-> m' (Tensor Value dtype) | value |
:: TensorType dtype | |
=> Tensor v'1 ByteString | handle |
-> Tensor v'2 Int32 | index |
-> Tensor v'3 Float | flow_in |
-> Tensor Build dtype | value |
Deprecated. Use TensorArrayReadV3
:: TensorType dtype | |
=> OpParams | |
-> Tensor v'1 ByteString | handle |
-> Tensor v'2 Int32 | index |
-> Tensor v'3 Float | flow_in |
-> Tensor Build dtype | value |
:: (MonadBuild m', TensorType dtype) | |
=> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 Int32 | index |
-> Tensor v'3 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value dtype) | value: The tensor that is read from the TensorArray. |
Read an element from the TensorArray into output value
.
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 Int32 | index |
-> Tensor v'3 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value dtype) | value: The tensor that is read from the TensorArray. |
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref ByteString | handle |
-> Tensor v'2 Int32 | indices |
-> Tensor v'3 t | value |
-> Tensor v'4 Float | flow_in |
-> m' (Tensor Value Float) | flow_out |
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor Ref ByteString | handle |
-> Tensor v'2 Int32 | indices |
-> Tensor v'3 t | value |
-> Tensor v'4 Float | flow_in |
-> m' (Tensor Value Float) | flow_out |
:: TensorType t | |
=> Tensor v'1 ByteString | handle |
-> Tensor v'2 Int32 | indices |
-> Tensor v'3 t | value |
-> Tensor v'4 Float | flow_in |
-> Tensor Build Float | flow_out |
Deprecated. Use TensorArrayScatterV3
:: (MonadBuild m', TensorType t) | |
=> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 Int32 | indices: The locations at which to write the tensor elements. |
-> Tensor v'3 t | value: The concatenated tensor to write to the TensorArray. |
-> Tensor v'4 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value Float) | flow_out: A float scalar that enforces proper chaining of operations. |
Scatter the data from the input value into specific TensorArray elements.
indices
must be a vector, its length must match the first dim of value
.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 Int32 | indices: The locations at which to write the tensor elements. |
-> Tensor v'3 t | value: The concatenated tensor to write to the TensorArray. |
-> Tensor v'4 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value Float) | flow_out: A float scalar that enforces proper chaining of operations. |
:: MonadBuild m' | |
=> Tensor Ref ByteString | handle |
-> Tensor v'2 Float | flow_in |
-> m' (Tensor Value Int32) | size |
:: MonadBuild m' | |
=> OpParams | |
-> Tensor Ref ByteString | handle |
-> Tensor v'2 Float | flow_in |
-> m' (Tensor Value Int32) | size |
Deprecated. Use TensorArraySizeV3
:: MonadBuild m' | |
=> ResourceHandle | handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). |
-> Tensor v'2 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value Int32) | size: The current size of the TensorArray. |
Get the current size of the TensorArray.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). |
-> Tensor v'2 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value Int32) | size: The current size of the TensorArray. |
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref ByteString | handle |
-> Tensor v'2 t | value |
-> Tensor v'3 Int64 | lengths |
-> Tensor v'4 Float | flow_in |
-> m' (Tensor Value Float) | flow_out |
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor Ref ByteString | handle |
-> Tensor v'2 t | value |
-> Tensor v'3 Int64 | lengths |
-> Tensor v'4 Float | flow_in |
-> m' (Tensor Value Float) | flow_out |
:: TensorType t | |
=> Tensor v'1 ByteString | handle |
-> Tensor v'2 t | value |
-> Tensor v'3 Int64 | lengths |
-> Tensor v'4 Float | flow_in |
-> Tensor Build Float | flow_out |
Deprecated. Use TensorArraySplitV3
:: (MonadBuild m', TensorType t) | |
=> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 t | value: The concatenated tensor to write to the TensorArray. |
-> Tensor v'3 Int64 | lengths: The vector of lengths, how to split the rows of value into the TensorArray. |
-> Tensor v'4 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value Float) | flow_out: A float scalar that enforces proper chaining of operations. |
Split the data from the input value into TensorArray elements.
Assuming that lengths
takes on values
```(n0, n1, ..., n(T-1))```
and that value
has shape
```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
this splits values into a TensorArray with T tensors.
TensorArray index t will be the subtensor of values with starting position
```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
and having size
```nt x d0 x d1 x ...```
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 t | value: The concatenated tensor to write to the TensorArray. |
-> Tensor v'3 Int64 | lengths: The vector of lengths, how to split the rows of value into the TensorArray. |
-> Tensor v'4 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value Float) | flow_out: A float scalar that enforces proper chaining of operations. |
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref ByteString | handle |
-> Tensor v'2 t | value |
-> Tensor v'3 Float | flow_in |
-> m' (Tensor Value Float) | flow_out |
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor Ref ByteString | handle |
-> Tensor v'2 t | value |
-> Tensor v'3 Float | flow_in |
-> m' (Tensor Value Float) | flow_out |
:: MonadBuild m' | |
=> DataType | dtype |
-> Tensor v'1 Int32 | size |
-> m' (Tensor Value ByteString) | handle |
Deprecated. Use TensorArrayV3
:: MonadBuild m' | |
=> OpParams | |
-> DataType | dtype |
-> Tensor v'1 Int32 | size |
-> m' (Tensor Value ByteString) | handle |
:: MonadBuild m' | |
=> DataType | dtype: The type of the elements on the tensor_array. |
-> Tensor v'1 Int32 | size: The size of the array. |
-> m' (ResourceHandle, Tensor Value Float) | (handle, flow)
|
An array of Tensors of given size, with data written via Write and read
via Read or Pack.
:: MonadBuild m' | |
=> OpParams | |
-> DataType | dtype: The type of the elements on the tensor_array. |
-> Tensor v'1 Int32 | size: The size of the array. |
-> m' (ResourceHandle, Tensor Value Float) | (handle, flow)
|
:: (MonadBuild m', TensorType t) | |
=> Tensor Ref ByteString | handle |
-> Tensor v'2 Int32 | index |
-> Tensor v'3 t | value |
-> Tensor v'4 Float | flow_in |
-> m' (Tensor Value Float) | flow_out |
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Tensor Ref ByteString | handle |
-> Tensor v'2 Int32 | index |
-> Tensor v'3 t | value |
-> Tensor v'4 Float | flow_in |
-> m' (Tensor Value Float) | flow_out |
:: TensorType t | |
=> Tensor v'1 ByteString | handle |
-> Tensor v'2 Int32 | index |
-> Tensor v'3 t | value |
-> Tensor v'4 Float | flow_in |
-> Tensor Build Float | flow_out |
Deprecated. Use TensorArrayGradV3
:: (MonadBuild m', TensorType t) | |
=> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 Int32 | index: The position to write to inside the TensorArray. |
-> Tensor v'3 t | value: The tensor to write to the TensorArray. |
-> Tensor v'4 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value Float) | flow_out: A float scalar that enforces proper chaining of operations. |
Push an element onto the tensor_array.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> ResourceHandle | handle: The handle to a TensorArray. |
-> Tensor v'2 Int32 | index: The position to write to inside the TensorArray. |
-> Tensor v'3 t | value: The tensor to write to the TensorArray. |
-> Tensor v'4 Float | flow_in: A float scalar that enforces proper chaining of operations. |
-> m' (Tensor Value Float) | flow_out: A float scalar that enforces proper chaining of operations. |
:: TensorType t | |
=> Tensor v'1 t | tensor: A tensor to serialize. |
-> Tensor Build ByteString | summary |
Outputs a Summary
protocol buffer with a tensor.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | tensor: A tensor to serialize. |
-> Tensor Build ByteString | summary |
:: MonadBuild m' | |
=> m' (Tensor Ref ByteString) | reader_handle: The handle to reference the Reader. |
A Reader that outputs the lines of a file delimited by '\n'.
:: MonadBuild m' | |
=> OpParams | |
-> m' (Tensor Ref ByteString) | reader_handle: The handle to reference the Reader. |
:: MonadBuild m' | |
=> m' ResourceHandle | reader_handle: The handle to reference the Reader. |
A Reader that outputs the lines of a file delimited by '\n'.
:: MonadBuild m' | |
=> OpParams | |
-> m' ResourceHandle | reader_handle: The handle to reference the Reader. |
threadUnsafeUnigramCandidateSampler
:: Int64 | num_sampled: Number of candidates to randomly sample per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Int64 | range_max: The sampler will sample integers from the interval [0, range_max). |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
Generates labels for candidate sampling with a learned unigram distribution.
See explanations of candidate sampling and the data formats at go/candidate-sampling.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
threadUnsafeUnigramCandidateSampler'
:: OpParams | |
-> Int64 | num_sampled: Number of candidates to randomly sample per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Int64 | range_max: The sampler will sample integers from the interval [0, range_max). |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
:: (TensorType t, OneOf `[Int32, Int64]` tmultiples) | |
=> Tensor v'1 t | input: 1-D or higher. |
-> Tensor v'2 tmultiples | multiples: 1-D. Length must be the same as the number of dimensions in |
-> Tensor Build t | output |
Constructs a tensor by tiling a given tensor.
This operation creates a new tensor by replicating input
multiples
times.
The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
and the values of input
are replicated `multiples[i]` times along the i
th
dimension. For example, tiling `[a b c d]` by `[2]` produces
`[a b c d a b c d]`.
Returns the gradient of Tile
.
Since Tile
takes an input and repeats the input multiples
times
along each dimension, TileGrad
takes in multiples
and aggregates
each repeated tile of input
into output
.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Int64 | k: Number of top elements to look for along the last dimension (along each row for matrices). |
-> Tensor v'1 t | input: 1-D or higher with last dimension at least |
-> (Tensor Build t, Tensor Build Int32) | (values, indices)
|
Finds values and indices of the k
largest elements for the last dimension.
If the input is a vector (rank-1), finds the k
largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
j
-th largest entry in input
, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top k
entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
If k
varies dynamically, use TopKV2
below.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Int64 | k: Number of top elements to look for along the last dimension (along each row for matrices). |
-> Tensor v'1 t | input: 1-D or higher with last dimension at least |
-> (Tensor Build t, Tensor Build Int32) | (values, indices)
|
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | input: 1-D or higher with last dimension at least |
-> Tensor v'2 Int32 | k: 0-D. Number of top elements to look for along the last dimension (along each row for matrices). |
-> (Tensor Build t, Tensor Build Int32) | (values, indices)
|
Finds values and indices of the k
largest elements for the last dimension.
If the input is a vector (rank-1), finds the k
largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
j
-th largest entry in input
, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top k
entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> OpParams | |
-> Tensor v'1 t | input: 1-D or higher with last dimension at least |
-> Tensor v'2 Int32 | k: 0-D. Number of top elements to look for along the last dimension (along each row for matrices). |
-> (Tensor Build t, Tensor Build Int32) | (values, indices)
|
:: (TensorType t, OneOf `[Int32, Int64]` tperm) | |
=> Tensor v'1 t | x |
-> Tensor v'2 tperm | perm |
-> Tensor Build t | y |
Shuffle dimensions of x according to a permutation.
The output y
has the same rank as x
. The shapes of x
and y
satisfy:
`y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t | |
=> Tensor v'1 t | x |
-> Tensor v'2 t | y |
-> Tensor Build t | z |
Returns x / y element-wise for integer types.
Truncation designates that negative numbers will round fractional quantities
toward zero. I.e. -7 / 5 = 1. This matches C semantics but it is different
than Python semantics. See FloorDiv
for a division function that matches
Python Semantics.
- NOTE*:
TruncateDiv
supports broadcasting. More about broadcasting here
Returns element-wise remainder of division. This emulates C semantics where
true, this follows C semantics in that the result here is consistent with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
- NOTE*:
Mod
supports broadcasting. More about broadcasting here
:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) | |
=> Tensor v'1 t | shape: The shape of the output tensor. |
-> m' (Tensor Value dtype) | output: A tensor of the specified shape filled with random truncated normal values. |
Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with mean 0 and standard deviation 1, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked.
:: Int64 | num_sampled: Number of candidates to randomly sample per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Int64 | range_max: The sampler will sample integers from the interval [0, range_max). |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
Generates labels for candidate sampling with a uniform distribution.
See explanations of candidate sampling and the data formats at go/candidate-sampling.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
:: OpParams | |
-> Int64 | num_sampled: Number of candidates to randomly sample per batch. |
-> Int64 | num_true: Number of true labels per context. |
-> Int64 | range_max: The sampler will sample integers from the interval [0, range_max). |
-> Bool | unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities. |
-> Tensor v'1 Int64 | true_classes: A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label. |
-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) | (sampled_candidates, true_expected_count, sampled_expected_count)
|
:: (TensorType t, OneOf `[Int32, Int64]` out_idx) | |
=> Tensor v'1 t | x: 1-D. |
-> (Tensor Build t, Tensor Build out_idx) | (y, idx)
|
Finds unique elements in a 1-D tensor.
This operation returns a tensor y
containing all of the unique elements of x
sorted in the same order that they occur in x
. This operation also returns a
tensor idx
the same size as x
that contains the index of each value of x
in the unique output y
. In other words:
`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
For example:
```prettyprint
# tensor x
is [1, 1, 2, 4, 4, 4, 7, 8, 8]
y, idx = unique(x)
y ==> [1, 2, 4, 7, 8]
idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
```
:: (TensorType t, OneOf `[Int32, Int64]` out_idx) | |
=> Tensor v'1 t | x: 1-D. |
-> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) | (y, idx, count)
|
Finds unique elements in a 1-D tensor.
This operation returns a tensor y
containing all of the unique elements of x
sorted in the same order that they occur in x
. This operation also returns a
tensor idx
the same size as x
that contains the index of each value of x
in the unique output y
. Finally, it returns a third tensor count
that
contains the count of each element of y
in x
. In other words:
`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
For example:
```prettyprint
# tensor x
is [1, 1, 2, 4, 4, 4, 7, 8, 8]
y, idx, count = unique_with_counts(x)
y ==> [1, 2, 4, 7, 8]
idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
count ==> [2, 1, 3, 1, 2]
```
:: TensorType t | |
=> Int64 | num |
-> Tensor v'1 t | value: 1-D or higher, with |
-> [Tensor Build t] | output: The list of tensors unpacked from |
Unpacks a given dimension of a rank-R
tensor into num
rank-`(R-1)` tensors.
Unpacks num
tensors from value
by chipping it along the axis
dimension.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in output
is the slice `value[i, :, :, :]`
and each tensor in output
will have shape `(B, C, D)`. (Note that the
dimension unpacked along is gone, unlike split
).
If `axis == 1` then the i'th tensor in output
is the slice `value[:, i, :, :]`
and each tensor in output
will have shape `(A, C, D)`.
Etc.
This is the opposite of pack
.
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A tensor whose shape is a prefix of `data.shape`. |
-> Tensor v'3 Int32 | num_segments |
-> Tensor Build t | output: Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
|
Computes the sum along segments of a tensor.
Read the section on Segmentation for an explanation of segments.
Computes a tensor such that
`(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
that `segment_ids[j...] == i`. Unlike SegmentSum
, segment_ids
need not be sorted and need not cover all values in the full
range of valid values.
If the sum is empty for a given segment ID i
, `output[i] = 0`.
num_segments
should equal the number of distinct segment IDs.
style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" style="width:100%" src="../../images/UnsortedSegmentSum.png" alt /div
:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) | |
=> OpParams | |
-> Tensor v'1 t | data |
-> Tensor v'2 tindices | segment_ids: A tensor whose shape is a prefix of `data.shape`. |
-> Tensor v'3 Int32 | num_segments |
-> Tensor Build t | output: Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
|
:: (MonadBuild m', TensorTypes dtypes) | |
=> m' (TensorList Value dtypes) | values |
Op is similar to a lightweight Dequeue. The basic funtionality is similar to
dequeue with many fewer capabilities and options. This Op is optimized for performance.
:: (MonadBuild m', TensorTypes dtypes) | |
=> OpParams | |
-> m' (TensorList Value dtypes) | values |
:: MonadBuild m' | |
=> DataType | dtype: the type of this variable. Must agree with the dtypes of all ops using this variable. |
-> Shape | shape: The (possibly partially specified) shape of this variable. |
-> m' ResourceHandle | resource |
Creates a handle to a Variable resource.
:: MonadBuild m' | |
=> OpParams | |
-> DataType | dtype: the type of this variable. Must agree with the dtypes of all ops using this variable. |
-> Shape | shape: The (possibly partially specified) shape of this variable. |
-> m' ResourceHandle | resource |
:: MonadBuild m' | |
=> ResourceHandle | resource: the input resource handle. |
-> m' (Tensor Value Bool) | is_initialized: a scalar boolean which is true if the variable has been initialized. |
Checks whether a resource handle-based variable has been initialized.
:: MonadBuild m' | |
=> OpParams | |
-> ResourceHandle | resource: the input resource handle. |
-> m' (Tensor Value Bool) | is_initialized: a scalar boolean which is true if the variable has been initialized. |
:: (MonadBuild m', TensorType dtype) | |
=> Shape | shape |
-> m' (Tensor Ref dtype) | ref |
Use VariableV2 instead.
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> Shape | shape |
-> m' (Tensor Ref dtype) | ref |
:: (MonadBuild m', TensorType dtype) | |
=> Shape | shape: The shape of the variable tensor. |
-> m' (Tensor Ref dtype) | ref: A reference to the variable tensor. |
Holds state in the form of a tensor that persists across steps.
Outputs a ref to the tensor state so it may be read or modified. TODO(zhifengc/mrry): Adds a pointer to a more detail document about sharing states in tensorflow.
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> Shape | shape: The shape of the variable tensor. |
-> m' (Tensor Ref dtype) | ref: A reference to the variable tensor. |
Returns locations of true values in a boolean tensor.
This operation returns the coordinates of true elements in input
. The
coordinates are returned in a 2-D tensor where the first dimension (rows)
represents the number of true elements, and the second dimension (columns)
represents the coordinates of the true elements. Keep in mind, the shape of
the output tensor can vary depending on how many true values there are in
input
. Indices are output in row-major order.
For example:
```prettyprint
# input
tensor is [[True, False]
# [True, False]]
# input
has two true values, so output has two coordinates.
# input
has rank of 2, so coordinates have two indices.
where(input) ==> [[0, 0],
[1, 0]]
# input
tensor is [[[True, False]
# [True, False]]
# [[False, True]
# [False, True]]
# [[False, False]
# [False, True]]]
# input
has 5 true values, so output has 5 coordinates.
# input
has rank of 3, so coordinates have three indices.
where(input) ==> [[0, 0, 0],
[0, 1, 0],
[1, 0, 1],
[1, 1, 1],
[2, 1, 1]]
```
:: MonadBuild m' | |
=> m' (Tensor Ref ByteString) | reader_handle: The handle to reference the Reader. |
A Reader that outputs the entire contents of a file as a value.
To use, enqueue filenames in a Queue. The output of ReaderRead will be a filename (key) and the contents of that file (value).
:: MonadBuild m' | |
=> OpParams | |
-> m' (Tensor Ref ByteString) | reader_handle: The handle to reference the Reader. |
:: MonadBuild m' | |
=> m' ResourceHandle | reader_handle: The handle to reference the Reader. |
A Reader that outputs the entire contents of a file as a value.
To use, enqueue filenames in a Queue. The output of ReaderRead will be a filename (key) and the contents of that file (value).
:: MonadBuild m' | |
=> OpParams | |
-> m' ResourceHandle | reader_handle: The handle to reference the Reader. |
:: MonadBuild m' | |
=> Tensor v'1 ByteString | filename: scalar. The name of the file to which we write the contents. |
-> Tensor v'2 ByteString | contents: scalar. The content to be written to the output file. |
-> m' ControlNode |
Writes contents to the file at input filename. Creates file if not existing.
:: MonadBuild m' | |
=> OpParams | |
-> Tensor v'1 ByteString | filename: scalar. The name of the file to which we write the contents. |
-> Tensor v'2 ByteString | contents: scalar. The content to be written to the output file. |
-> m' ControlNode |
:: TensorType t | |
=> Tensor v'1 t | x: a tensor of type T. |
-> Tensor Build t | y: a tensor of the same shape and type as x but filled with zeros. |
Returns a tensor of zeros with the same shape and type as x.
:: TensorType t | |
=> OpParams | |
-> Tensor v'1 t | x: a tensor of type T. |
-> Tensor Build t | y: a tensor of the same shape and type as x but filled with zeros. |
Compute the Hurwitz zeta function \(zeta(x, q)\).
The Hurwitz zeta function is defined as:
``` zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x} ```
:: (MonadBuild m', TensorType t) | |
=> Int64 | index: This argument is the index-th argument of the function. |
-> m' (Tensor Value t) | output: The argument. |
A graph node which represents an argument to a function.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Int64 | index: This argument is the index-th argument of the function. |
-> m' (Tensor Value t) | output: The argument. |
:: (TensorType t, TensorTypes out_types) | |
=> [Tensor v'1 t] | input |
-> TensorList Build out_types | output |
Converts an array of tensors to a list of tensors.
:: (TensorType t, TensorTypes out_types) | |
=> OpParams | |
-> [Tensor v'1 t] | input |
-> TensorList Build out_types | output |
:: (TensorType srcT, TensorType dstT) | |
=> Tensor v'1 srcT | x |
-> Tensor Build dstT | y |
Cast x of type SrcT to y of DstT.
_HostCast requires its input and produces its output in host memory.
:: (TensorType srcT, TensorType dstT) | |
=> OpParams | |
-> Tensor v'1 srcT | x |
-> Tensor Build dstT | y |
:: (MonadBuild m', TensorType tensor_type) | |
=> Int64 | send_device_incarnation: The current incarnation of send_device. |
-> m' (Tensor Value tensor_type) | tensor: The tensor to receive. |
Receives the named tensor from send_device on recv_device.
_HostRecv requires its input on host memory whereas _Recv requires its input on device memory.
:: (MonadBuild m', TensorType tensor_type) | |
=> OpParams | |
-> Int64 | send_device_incarnation: The current incarnation of send_device. |
-> m' (Tensor Value tensor_type) | tensor: The tensor to receive. |
:: (MonadBuild m', TensorType t) | |
=> Int64 | send_device_incarnation: The current incarnation of send_device. |
-> Tensor v'1 t | tensor: The tensor to send. |
-> m' ControlNode |
Sends the named tensor from send_device to recv_device.
_HostSend requires its input on host memory whereas _Send requires its input on device memory.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Int64 | send_device_incarnation: The current incarnation of send_device. |
-> Tensor v'1 t | tensor: The tensor to send. |
-> m' ControlNode |
:: (TensorTypes tin, TensorType t) | |
=> Int64 | N |
-> TensorList v'1 tin | input |
-> [Tensor Build t] | output |
Converts a list of tensors to an array of tensors.
:: (TensorTypes tin, TensorType t) | |
=> OpParams | |
-> Int64 | N |
-> TensorList v'1 tin | input |
-> [Tensor Build t] | output |
:: (MonadBuild m', TensorType dtype) | |
=> Shape | shape: 1-D |
-> m' (Tensor Value dtype) | output: An empty Tensor of the specified type. |
Creates an empty Tensor with shape shape
and type dtype
.
The memory can optionally be initialized. This is usually useful in conjunction with inplace operations.
:: (MonadBuild m', TensorType dtype) | |
=> OpParams | |
-> Shape | shape: 1-D |
-> m' (Tensor Value dtype) | output: An empty Tensor of the specified type. |
:: TensorType t | |
=> Int64 | loc: A scalar indicating the index of the first dimension such that value[loc, :] is updated. |
-> Tensor v'1 t | value: A |
-> Tensor v'2 t | update: A |
-> Tensor Build t | output: |
Updates input value
at loc
with update
.
If you use this function you will almost certainly want to add a control dependency as done in the implementation of parallel_stack to avoid race conditions.
:: TensorType t | |
=> OpParams | |
-> Int64 | loc: A scalar indicating the index of the first dimension such that value[loc, :] is updated. |
-> Tensor v'1 t | value: A |
-> Tensor v'2 t | update: A |
-> Tensor Build t | output: |
:: (MonadBuild m', TensorType tensor_type) | |
=> Int64 | send_device_incarnation: The current incarnation of send_device. |
-> m' (Tensor Value tensor_type) | tensor: The tensor to receive. |
Receives the named tensor from send_device on recv_device.
:: (MonadBuild m', TensorType tensor_type) | |
=> OpParams | |
-> Int64 | send_device_incarnation: The current incarnation of send_device. |
-> m' (Tensor Value tensor_type) | tensor: The tensor to receive. |
:: (MonadBuild m', TensorType t) | |
=> Int64 | index: This return value is the index-th return value of the function. |
-> Tensor v'1 t | input: The return value. |
-> m' ControlNode |
A graph node which represents a return value of a function.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Int64 | index: This return value is the index-th return value of the function. |
-> Tensor v'1 t | input: The return value. |
-> m' ControlNode |
:: (MonadBuild m', TensorType t) | |
=> Int64 | send_device_incarnation: The current incarnation of send_device. |
-> Tensor v'1 t | tensor: The tensor to send. |
-> m' ControlNode |
Sends the named tensor from send_device to recv_device.
:: (MonadBuild m', TensorType t) | |
=> OpParams | |
-> Int64 | send_device_incarnation: The current incarnation of send_device. |
-> Tensor v'1 t | tensor: The tensor to send. |
-> m' ControlNode |