diff --git a/docs/haddock/doc-index-124.html b/docs/haddock/doc-index-124.html new file mode 100644 index 0000000..f837e6d --- /dev/null +++ b/docs/haddock/doc-index-124.html @@ -0,0 +1,4 @@ + (Index - |)

 

Index - |

|:|TensorFlow.Types
\ No newline at end of file diff --git a/docs/haddock/doc-index-47.html b/docs/haddock/doc-index-47.html index 13ac4bd..58ca838 100644 --- a/docs/haddock/doc-index-47.html +++ b/docs/haddock/doc-index-47.html @@ -1,4 +1,4 @@ (Index - /)

 

Index - /

/=TensorFlow.Types, TensorFlow.Core
\ No newline at end of file +

 

Index - /

/:/TensorFlow.Types
/=TensorFlow.Types, TensorFlow.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-58.html b/docs/haddock/doc-index-58.html new file mode 100644 index 0000000..00e243b --- /dev/null +++ b/docs/haddock/doc-index-58.html @@ -0,0 +1,4 @@ + (Index - :)

 

Index - :

:/TensorFlow.Types
\ No newline at end of file diff --git a/docs/haddock/doc-index-92.html b/docs/haddock/doc-index-92.html index d5185e5..249213d 100644 --- a/docs/haddock/doc-index-92.html +++ b/docs/haddock/doc-index-92.html @@ -1,4 +1,4 @@ (Index - \)

 

Index - \

\\TensorFlow.Types
\ No newline at end of file +

 

Index - \

\\TensorFlow.Types
\ No newline at end of file diff --git a/docs/haddock/doc-index-95.html b/docs/haddock/doc-index-95.html index 7a1c25d..3ec6280 100644 --- a/docs/haddock/doc-index-95.html +++ b/docs/haddock/doc-index-95.html @@ -1,4 +1,4 @@ (Index - _)

 

Index - _

_ArgTensorFlow.GenOps.Core
_AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
_GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
_GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
_GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
_HostCastTensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
_NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'attrProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
_OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
_OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
_OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
_OpDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
_opInputsTensorFlow.Output
_OpList'opProto.Tensorflow.Core.Framework.OpDef
_opNameTensorFlow.Output
_OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
_opTypeTensorFlow.Output
_RecvTensorFlow.GenOps.Core
_ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
_RetvalTensorFlow.GenOps.Core
_RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
_SendTensorFlow.GenOps.Core
_TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
_TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
_ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file +

 

Index - _

_ArgTensorFlow.GenOps.Core
_Arg'TensorFlow.GenOps.Core
_ArrayToListTensorFlow.GenOps.Core
_ArrayToList'TensorFlow.GenOps.Core
_AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
_Event'fileVersionProto.Tensorflow.Core.Util.Event
_Event'graphDefProto.Tensorflow.Core.Util.Event
_Event'logMessageProto.Tensorflow.Core.Util.Event
_Event'metaGraphDefProto.Tensorflow.Core.Util.Event
_Event'sessionLogProto.Tensorflow.Core.Util.Event
_Event'stepProto.Tensorflow.Core.Util.Event
_Event'summaryProto.Tensorflow.Core.Util.Event
_Event'taggedRunMetadataProto.Tensorflow.Core.Util.Event
_Event'wallTimeProto.Tensorflow.Core.Util.Event
_GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
_GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
_GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
_GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
_HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
_HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
_HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
_HistogramProto'minProto.Tensorflow.Core.Framework.Summary
_HistogramProto'numProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
_HostCastTensorFlow.GenOps.Core
_HostCast'TensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostRecv'TensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_HostSend'TensorFlow.GenOps.Core
_ListToArrayTensorFlow.GenOps.Core
_ListToArray'TensorFlow.GenOps.Core
_LogMessage'levelProto.Tensorflow.Core.Util.Event
_LogMessage'messageProto.Tensorflow.Core.Util.Event
_NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
_NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'attrProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
_OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
_OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
_OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
_OpDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
_opInputsTensorFlow.Output
_OpList'opProto.Tensorflow.Core.Framework.OpDef
_opNameTensorFlow.Output
_OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
_opTypeTensorFlow.Output
_ParallelConcatStartTensorFlow.GenOps.Core
_ParallelConcatStart'TensorFlow.GenOps.Core
_ParallelConcatUpdateTensorFlow.GenOps.Core
_ParallelConcatUpdate'TensorFlow.GenOps.Core
_RecvTensorFlow.GenOps.Core
_Recv'TensorFlow.GenOps.Core
_ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
_RetvalTensorFlow.GenOps.Core
_Retval'TensorFlow.GenOps.Core
_RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
_SendTensorFlow.GenOps.Core
_Send'TensorFlow.GenOps.Core
_SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
_SessionLog'msgProto.Tensorflow.Core.Util.Event
_SessionLog'statusProto.Tensorflow.Core.Util.Event
_Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
_Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
_Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
_Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
_Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
_Summary'valueProto.Tensorflow.Core.Framework.Summary
_Summary'Value'audioProto.Tensorflow.Core.Framework.Summary
_Summary'Value'histoProto.Tensorflow.Core.Framework.Summary
_Summary'Value'imageProto.Tensorflow.Core.Framework.Summary
_Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
_Summary'Value'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
_Summary'Value'simpleValueProto.Tensorflow.Core.Framework.Summary
_Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
_Summary'Value'tensorProto.Tensorflow.Core.Framework.Summary
_SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
_TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
_TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
_TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
_TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
_ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file diff --git a/docs/haddock/doc-index-A.html b/docs/haddock/doc-index-A.html index 2f66c54..c983946 100644 --- a/docs/haddock/doc-index-A.html +++ b/docs/haddock/doc-index-A.html @@ -1,4 +1,4 @@ (Index - A)

 

Index - A

abortTensorFlow.GenOps.Core
abs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
add 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
addInitializerTensorFlow.Build
addManySparseToTensorsMapTensorFlow.GenOps.Core
addN 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addNewOpTensorFlow.Build
addSparseToTensorsMapTensorFlow.GenOps.Core
addSummaryTensorFlow.Build
adjustContrastTensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allocatorTypeProto.Tensorflow.Core.Protobuf.Config
allowedValuesProto.Tensorflow.Core.Framework.OpDef
allowGrowthProto.Tensorflow.Core.Protobuf.Config
allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
AllTensorTypesTensorFlow.Types
anyTensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
ArgKindTensorFlow.OpGen.ParsedOp
argLengthTensorFlow.OpGen.ParsedOp
argMax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMinTensorFlow.GenOps.Core
ArgResourceTensorFlow.OpGen.ParsedOp
ArgTensorEitherTensorFlow.OpGen.ParsedOp
ArgTensorRefTensorFlow.OpGen.ParsedOp
ArgTensorValueTensorFlow.OpGen.ParsedOp
ArgTypeTensorFlow.OpGen.ParsedOp
argTypeTensorFlow.OpGen.ParsedOp
ArgTypeAttrTensorFlow.OpGen.ParsedOp
argTypeAttrTensorFlow.OpGen.ParsedOp
ArgTypeFixedTensorFlow.OpGen.ParsedOp
asGraphDefTensorFlow.Build, TensorFlow.Core
asinTensorFlow.GenOps.Core
assertAllCloseTensorFlow.Test
assign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assignAddTensorFlow.GenOps.Core
assignAddVariableOpTensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asyncProdNodesTensorFlow.Session, TensorFlow.Core
atanTensorFlow.GenOps.Core
Attr 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
attr 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
AttrBaseTypeTensorFlow.OpGen.ParsedOp
AttrBoolTensorFlow.OpGen.ParsedOp
AttrBytesTensorFlow.OpGen.ParsedOp
attrDescriptionTensorFlow.OpGen.ParsedOp
AttrFloatTensorFlow.OpGen.ParsedOp
AttributeTensorFlow.Types
attrInfoTensorFlow.OpGen.ParsedOp
AttrInt64TensorFlow.OpGen.ParsedOp
attrLensTensorFlow.Types
AttrListTensorFlow.OpGen.ParsedOp
attrNameTensorFlow.OpGen.ParsedOp
AttrShapeTensorFlow.OpGen.ParsedOp
AttrSingleTensorFlow.OpGen.ParsedOp
AttrTensorTensorFlow.OpGen.ParsedOp
AttrType 
1 (Type/Class)TensorFlow.OpGen.ParsedOp
2 (Data Constructor)TensorFlow.OpGen.ParsedOp
AttrValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
audioSummaryTensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - A

abortTensorFlow.GenOps.Core
abort'TensorFlow.GenOps.Core
abs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
abs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorApplyGradient'TensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorNumAccumulated'TensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorSetGlobalStep'TensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
accumulatorTakeGradient'TensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
acos'TensorFlow.GenOps.Core
add 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
add' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
addInitializerTensorFlow.Build
addManySparseToTensorsMapTensorFlow.GenOps.Core
addManySparseToTensorsMap'TensorFlow.GenOps.Core
addN 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addN' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addNewOpTensorFlow.Build
addSparseToTensorsMapTensorFlow.GenOps.Core
addSparseToTensorsMap'TensorFlow.GenOps.Core
addSummaryTensorFlow.Tensor
adjustContrastTensorFlow.GenOps.Core
adjustContrast'TensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustContrastv2'TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
adjustHue'TensorFlow.GenOps.Core
adjustSaturationTensorFlow.GenOps.Core
adjustSaturation'TensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
all'TensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allCandidateSampler'TensorFlow.GenOps.Core
allocatorTypeProto.Tensorflow.Core.Protobuf.Config
allowedValuesProto.Tensorflow.Core.Framework.OpDef
allowGrowthProto.Tensorflow.Core.Protobuf.Config
allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
AllTensorTypesTensorFlow.Types
anyTensorFlow.GenOps.Core
any'TensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdadelta'TensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagrad'TensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdagradDA'TensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyAdam'TensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyCenteredRMSProp'TensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyFtrl'TensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyGradientDescent'TensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyMomentum'TensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalAdagrad'TensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyProximalGradientDescent'TensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
applyRMSProp'TensorFlow.GenOps.Core
argCaseKindTensorFlow.OpGen.ParsedOp
ArgKindTensorFlow.OpGen.ParsedOp
argKindTensorFlow.OpGen.ParsedOp
argLengthTensorFlow.OpGen.ParsedOp
argMax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMinTensorFlow.GenOps.Core
argMin'TensorFlow.GenOps.Core
ArgSomeTensorTensorFlow.OpGen.ParsedOp
ArgTensorBuildTensorFlow.OpGen.ParsedOp
ArgTensorRefTensorFlow.OpGen.ParsedOp
ArgTensorValueTensorFlow.OpGen.ParsedOp
ArgTypeTensorFlow.OpGen.ParsedOp
argTypeTensorFlow.OpGen.ParsedOp
ArgTypeAttrTensorFlow.OpGen.ParsedOp
argTypeAttrTensorFlow.OpGen.ParsedOp
ArgTypeFixedTensorFlow.OpGen.ParsedOp
asGraphDefTensorFlow.Build, TensorFlow.Core
asinTensorFlow.GenOps.Core
asin'TensorFlow.GenOps.Core
assertTensorFlow.GenOps.Core
assert'TensorFlow.GenOps.Core
assertAllCloseTensorFlow.Test
assign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assignAddTensorFlow.GenOps.Core
assignAdd'TensorFlow.GenOps.Core
assignAddVariableOpTensorFlow.GenOps.Core
assignAddVariableOp'TensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignSub'TensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
assignVariableOp'TensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asString'TensorFlow.GenOps.Core
asyncProdNodesTensorFlow.Session, TensorFlow.Core
atanTensorFlow.GenOps.Core
atan'TensorFlow.GenOps.Core
Attr 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
attr 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
AttrBaseTypeTensorFlow.OpGen.ParsedOp
AttrBoolTensorFlow.OpGen.ParsedOp
AttrBytesTensorFlow.OpGen.ParsedOp
attrDescriptionTensorFlow.OpGen.ParsedOp
AttrFloatTensorFlow.OpGen.ParsedOp
AttributeTensorFlow.Types
attrInfoTensorFlow.OpGen.ParsedOp
AttrInt64TensorFlow.OpGen.ParsedOp
attrLensTensorFlow.Types
AttrListTensorFlow.OpGen.ParsedOp
attrNameTensorFlow.OpGen.ParsedOp
AttrShapeTensorFlow.OpGen.ParsedOp
AttrSingleTensorFlow.OpGen.ParsedOp
AttrTensorTensorFlow.OpGen.ParsedOp
AttrType 
1 (Type/Class)TensorFlow.OpGen.ParsedOp
2 (Data Constructor)TensorFlow.OpGen.ParsedOp
AttrValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
audioProto.Tensorflow.Core.Framework.Summary
audioSummaryTensorFlow.GenOps.Core
audioSummary'TensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
audioSummaryV2'TensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool'TensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3D'TensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPool3DGrad'TensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
avgPoolGrad'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-All.html b/docs/haddock/doc-index-All.html index 26eb83b..b8a8c45 100644 --- a/docs/haddock/doc-index-All.html +++ b/docs/haddock/doc-index-All.html @@ -1,4 +1,4 @@ (Index)

 

Index

/=TensorFlow.Types, TensorFlow.Core
abortTensorFlow.GenOps.Core
abs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
add 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
addInitializerTensorFlow.Build
addManySparseToTensorsMapTensorFlow.GenOps.Core
addN 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addNewOpTensorFlow.Build
addSparseToTensorsMapTensorFlow.GenOps.Core
addSummaryTensorFlow.Build
adjustContrastTensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allocatorTypeProto.Tensorflow.Core.Protobuf.Config
allowedValuesProto.Tensorflow.Core.Framework.OpDef
allowGrowthProto.Tensorflow.Core.Protobuf.Config
allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
AllTensorTypesTensorFlow.Types
anyTensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
ArgKindTensorFlow.OpGen.ParsedOp
argLengthTensorFlow.OpGen.ParsedOp
argMax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMinTensorFlow.GenOps.Core
ArgResourceTensorFlow.OpGen.ParsedOp
ArgTensorEitherTensorFlow.OpGen.ParsedOp
ArgTensorRefTensorFlow.OpGen.ParsedOp
ArgTensorValueTensorFlow.OpGen.ParsedOp
ArgTypeTensorFlow.OpGen.ParsedOp
argTypeTensorFlow.OpGen.ParsedOp
ArgTypeAttrTensorFlow.OpGen.ParsedOp
argTypeAttrTensorFlow.OpGen.ParsedOp
ArgTypeFixedTensorFlow.OpGen.ParsedOp
asGraphDefTensorFlow.Build, TensorFlow.Core
asinTensorFlow.GenOps.Core
assertAllCloseTensorFlow.Test
assign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assignAddTensorFlow.GenOps.Core
assignAddVariableOpTensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asyncProdNodesTensorFlow.Session, TensorFlow.Core
atanTensorFlow.GenOps.Core
Attr 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
attr 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
AttrBaseTypeTensorFlow.OpGen.ParsedOp
AttrBoolTensorFlow.OpGen.ParsedOp
AttrBytesTensorFlow.OpGen.ParsedOp
attrDescriptionTensorFlow.OpGen.ParsedOp
AttrFloatTensorFlow.OpGen.ParsedOp
AttributeTensorFlow.Types
attrInfoTensorFlow.OpGen.ParsedOp
AttrInt64TensorFlow.OpGen.ParsedOp
attrLensTensorFlow.Types
AttrListTensorFlow.OpGen.ParsedOp
attrNameTensorFlow.OpGen.ParsedOp
AttrShapeTensorFlow.OpGen.ParsedOp
AttrSingleTensorFlow.OpGen.ParsedOp
AttrTensorTensorFlow.OpGen.ParsedOp
AttrType 
1 (Type/Class)TensorFlow.OpGen.ParsedOp
2 (Data Constructor)TensorFlow.OpGen.ParsedOp
AttrValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
audioSummaryTensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
bProto.Tensorflow.Core.Framework.AttrValue
barrierTensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasCkptTensorFlow.Examples.MNIST.TrainedGraph
bitcastTensorFlow.GenOps.Core
boolValProto.Tensorflow.Core.Framework.Tensor
broadcastGradientArgs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
BuildTensorFlow.Build, TensorFlow.Core
buildTensorFlow.Session, TensorFlow.Core
buildAndTensorFlow.Session, TensorFlow.Core
buildCostModelProto.Tensorflow.Core.Protobuf.Config
buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
buildListOpTensorFlow.BuildOp
BuildOpTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildTTensorFlow.Build, TensorFlow.Core
buildWithSummaryTensorFlow.Session, TensorFlow.Core
camelCaseTensorFlow.OpGen.ParsedOp
cast 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
ceilTensorFlow.GenOps.Core
checkEndianTensorFlow.Examples.MNIST.Parse
checkNumericsTensorFlow.GenOps.Core
choleskyTensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
collectAllSummariesTensorFlow.Build
colocateWithTensorFlow.Build, TensorFlow.Core
complexTensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
concat 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concatOffsetTensorFlow.GenOps.Core
concatV2TensorFlow.GenOps.Core
ConfigProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
ConfigProto'DeviceCountEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
conjTensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
constantTensorFlow.Ops
containerProto.Tensorflow.Core.Framework.ResourceHandle
ControlNode 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Build
2 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
controlTriggerTensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
copyTensorFlow.GenOps.Core
copyHostTensorFlow.GenOps.Core
cosTensorFlow.GenOps.Core
costGraphProto.Tensorflow.Core.Protobuf.Config
countUpToTensorFlow.GenOps.Core
createVariableOpTensorFlow.GenOps.Core
cropAndResizeTensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
DataTypeProto.Tensorflow.Core.Framework.Types
dcomplexValProto.Tensorflow.Core.Framework.Tensor
debugIdentityTensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
debugOpsProto.Tensorflow.Core.Protobuf.Config
DebugTensorWatch 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
debugUrlsProto.Tensorflow.Core.Protobuf.Config
decodeBase64TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeTensorDataTensorFlow.Types, TensorFlow.Core
defaultValueProto.Tensorflow.Core.Framework.OpDef
deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
DeleteTensorFlow.Types
deleteSessionTensorTensorFlow.GenOps.Core
deprecationProto.Tensorflow.Core.Framework.OpDef
depthToSpaceTensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
dequeueTensorFlow.Queue
descriptionProto.Tensorflow.Core.Framework.OpDef
deserializeManySparseTensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
Device 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Core
2 (Type/Class)TensorFlow.Output, TensorFlow.Core
device 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
deviceCountProto.Tensorflow.Core.Protobuf.Config
deviceFiltersProto.Tensorflow.Core.Protobuf.Config
deviceNameTensorFlow.Output, TensorFlow.Core
diagTensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dimProto.Tensorflow.Core.Framework.TensorShape
divTensorFlow.GenOps.Core
doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
docOpListTensorFlow.OpGen
doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
doubleValProto.Tensorflow.Core.Framework.Tensor
drawBoundingBoxesTensorFlow.GenOps.Core
drawMNISTTensorFlow.Examples.MNIST.Parse
dtypeProto.Tensorflow.Core.Framework.Tensor
DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
DT_BOOLProto.Tensorflow.Core.Framework.Types
DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
DT_DOUBLEProto.Tensorflow.Core.Framework.Types
DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
DT_FLOATProto.Tensorflow.Core.Framework.Types
DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
DT_HALFProto.Tensorflow.Core.Framework.Types
DT_HALF_REFProto.Tensorflow.Core.Framework.Types
DT_INT16Proto.Tensorflow.Core.Framework.Types
DT_INT16_REFProto.Tensorflow.Core.Framework.Types
DT_INT32Proto.Tensorflow.Core.Framework.Types
DT_INT32_REFProto.Tensorflow.Core.Framework.Types
DT_INT64Proto.Tensorflow.Core.Framework.Types
DT_INT64_REFProto.Tensorflow.Core.Framework.Types
DT_INT8Proto.Tensorflow.Core.Framework.Types
DT_INT8_REFProto.Tensorflow.Core.Framework.Types
DT_INVALIDProto.Tensorflow.Core.Framework.Types
DT_QINT16Proto.Tensorflow.Core.Framework.Types
DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
DT_QINT32Proto.Tensorflow.Core.Framework.Types
DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
DT_QINT8Proto.Tensorflow.Core.Framework.Types
DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
DT_QUINT16Proto.Tensorflow.Core.Framework.Types
DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
DT_QUINT8Proto.Tensorflow.Core.Framework.Types
DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
DT_RESOURCEProto.Tensorflow.Core.Framework.Types
DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
DT_STRINGProto.Tensorflow.Core.Framework.Types
DT_STRING_REFProto.Tensorflow.Core.Framework.Types
DT_UINT16Proto.Tensorflow.Core.Framework.Types
DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
DT_UINT8Proto.Tensorflow.Core.Framework.Types
DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
dynamicPartitionTensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
editDistanceTensorFlow.GenOps.Core
eluTensorFlow.GenOps.Core
eluGradTensorFlow.GenOps.Core
embeddingLookupTensorFlow.EmbeddingOps
enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
encodeBase64TensorFlow.GenOps.Core
encodeJpegTensorFlow.GenOps.Core
encodePngTensorFlow.GenOps.Core
encodeTensorDataTensorFlow.Types, TensorFlow.Core
enqueueTensorFlow.Queue
enterTensorFlow.GenOps.Core
eqLengthGuardTensorFlow.BuildOp
equal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
erfTensorFlow.GenOps.Core
erfcTensorFlow.GenOps.Core
evalBuildTTensorFlow.Build
ExcludedCaseTensorFlow.Types
excludeListTensorFlow.OpGen
exitTensorFlow.GenOps.Core
expTensorFlow.GenOps.Core
expandDims 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
explanationProto.Tensorflow.Core.Framework.OpDef
explicitInputAttrsTensorFlow.OpGen.ParsedOp
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
extractGlimpseTensorFlow.GenOps.Core
extractImagePatchesTensorFlow.GenOps.Core
fProto.Tensorflow.Core.Framework.AttrValue
factTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
Feed 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Data Constructor)TensorFlow.Nodes
2 (Type/Class)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorListTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
fFTTensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fill 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
flagParserTensorFlow.OpGen
floatValProto.Tensorflow.Core.Framework.Tensor
floorTensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
funcProto.Tensorflow.Core.Framework.AttrValue
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
gatherTensorFlow.GenOps.Core
gatherNdTensorFlow.GenOps.Core
getAllOpListTensorFlow.Internal.FFI
getFetchTensorFlow.Nodes
getNodesTensorFlow.Nodes
getOrAddOpTensorFlow.Build
getSessionHandleTensorFlow.GenOps.Core
getSessionTensorTensorFlow.GenOps.Core
getVarIntTensorFlow.Internal.VarInt
googleTestGoogle.Test
GPUOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
gpuOptionsProto.Tensorflow.Core.Protobuf.Config
gradientsTensorFlow.Gradient
GraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
2 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
GraphOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
graphOptionsProto.Tensorflow.Core.Protobuf.Config
GraphStateTensorFlow.Build
greaterTensorFlow.GenOps.Core
greaterEqualTensorFlow.GenOps.Core
groupTensorFlow.ControlFlow, TensorFlow.Core
halfValProto.Tensorflow.Core.Framework.Tensor
hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
HaskellName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
haskellNameTensorFlow.OpGen.ParsedOp
hasMinimumProto.Tensorflow.Core.Framework.OpDef
histogramSummaryTensorFlow.GenOps.Core
hoistBuildTTensorFlow.Build
hSVToRGBTensorFlow.GenOps.Core
iProto.Tensorflow.Core.Framework.AttrValue
identity 
1 (Function)TensorFlow.ControlFlow, TensorFlow.Core
2 (Function)TensorFlow.GenOps.Core
identityReaderTensorFlow.GenOps.Core
iFFTTensorFlow.GenOps.Core
iFFT2DTensorFlow.GenOps.Core
iFFT3DTensorFlow.GenOps.Core
igammaTensorFlow.GenOps.Core
igammacTensorFlow.GenOps.Core
imagTensorFlow.GenOps.Core
imageSummaryTensorFlow.GenOps.Core
immutableConstTensorFlow.GenOps.Core
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
inferredTypeAttrsTensorFlow.OpGen.ParsedOp
inferShapesProto.Tensorflow.Core.Protobuf.Config
initializedVariableTensorFlow.Ops
initializeTableTensorFlow.GenOps.Core
initializeTableFromTextFileTensorFlow.GenOps.Core
inputProto.Tensorflow.Core.Framework.NodeDef
inputArgProto.Tensorflow.Core.Framework.OpDef
int64ValProto.Tensorflow.Core.Framework.Tensor
interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
inTopKTensorFlow.GenOps.Core
intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
intValProto.Tensorflow.Core.Framework.Tensor
invTensorFlow.GenOps.Core
invertPermutationTensorFlow.GenOps.Core
invGradTensorFlow.GenOps.Core
isAggregateProto.Tensorflow.Core.Framework.OpDef
isCommutativeProto.Tensorflow.Core.Framework.OpDef
isFiniteTensorFlow.GenOps.Core
isInfTensorFlow.GenOps.Core
isNanTensorFlow.GenOps.Core
isRefProto.Tensorflow.Core.Framework.OpDef
isStatefulProto.Tensorflow.Core.Framework.OpDef
isVariableInitializedTensorFlow.GenOps.Core
key 
1 (Function)Proto.Tensorflow.Core.Protobuf.Config
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
l2LossTensorFlow.GenOps.Core
learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
lessTensorFlow.GenOps.Core
lessEqualTensorFlow.GenOps.Core
lgammaTensorFlow.GenOps.Core
libraryProto.Tensorflow.Core.Framework.Graph
linSpaceTensorFlow.GenOps.Core
listProto.Tensorflow.Core.Framework.AttrValue
ListArgTensorFlow.OpGen.ParsedOp
listDiffTensorFlow.GenOps.Core
logTensorFlow.GenOps.Core
log1pTensorFlow.GenOps.Core
logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
logicalAndTensorFlow.GenOps.Core
logicalNotTensorFlow.GenOps.Core
logicalOrTensorFlow.GenOps.Core
logSoftmaxTensorFlow.GenOps.Core
logUniformCandidateSamplerTensorFlow.GenOps.Core
lookupTableExportTensorFlow.GenOps.Core
lookupTableFindTensorFlow.GenOps.Core
lookupTableImportTensorFlow.GenOps.Core
lookupTableInsertTensorFlow.GenOps.Core
lookupTableSizeTensorFlow.GenOps.Core
loopCondTensorFlow.GenOps.Core
lRNTensorFlow.GenOps.Core
lRNGradTensorFlow.GenOps.Core
makeQueue2TensorFlow.Queue
matchingFilesTensorFlow.GenOps.Core
matMul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matrixBandPartTensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matTransposeTensorFlow.Ops
maxTensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
maybe'bProto.Tensorflow.Core.Framework.AttrValue
maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
maybe'fProto.Tensorflow.Core.Framework.AttrValue
maybe'funcProto.Tensorflow.Core.Framework.AttrValue
maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'iProto.Tensorflow.Core.Framework.AttrValue
maybe'libraryProto.Tensorflow.Core.Framework.Graph
maybe'listProto.Tensorflow.Core.Framework.AttrValue
maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
maybe'sProto.Tensorflow.Core.Framework.AttrValue
maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
maybe'tensorProto.Tensorflow.Core.Framework.AttrValue
maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
maybe'value 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
maybe'versionsProto.Tensorflow.Core.Framework.Graph
maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
mean 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mergeTensorFlow.GenOps.Core
mergeSummaryTensorFlow.GenOps.Core
mergeV2CheckpointsTensorFlow.GenOps.Core
minTensorFlow.GenOps.Core
minimum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
mirrorPadTensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
MixedListArgTensorFlow.OpGen.ParsedOp
MNISTTensorFlow.Examples.MNIST.Parse
mnistPbTensorFlow.Examples.MNIST.TrainedGraph
modTensorFlow.GenOps.Core
mul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
multinomialTensorFlow.GenOps.Core
Name 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
name 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
4 (Function)Proto.Tensorflow.Core.Framework.TensorShape
5 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
NameAttrList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
NameAttrList'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
namedTensorFlow.ControlFlow, TensorFlow.Core
neg 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
negTrainTensorFlow.GenOps.Core
nextIterationTensorFlow.GenOps.Core
nodeProto.Tensorflow.Core.Framework.Graph
NodeDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeDef'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeName 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
nodeNameProto.Tensorflow.Core.Protobuf.Config
NodesTensorFlow.Nodes, TensorFlow.Core
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
nonMaxSuppressionTensorFlow.GenOps.Core
noOp 
1 (Function)TensorFlow.ControlFlow, TensorFlow.Core
2 (Function)TensorFlow.GenOps.Core
notEqualTensorFlow.GenOps.Core
numberAttrProto.Tensorflow.Core.Framework.OpDef
numThreadsProto.Tensorflow.Core.Protobuf.Config
oneHot 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
OneOfTensorFlow.Types, TensorFlow.Core
OpTensorFlow.Output
op 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
opAttrTensorFlow.Output, TensorFlow.Build
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
4 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefTensorFlow.Build
OpDef'ArgDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
OpDef'AttrDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefWithNameTensorFlow.Build
OpDeprecation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
OpGenFlags 
1 (Data Constructor)TensorFlow.OpGen
2 (Type/Class)TensorFlow.OpGen
opInputsTensorFlow.Output, TensorFlow.Build
OpList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opNameTensorFlow.Output, TensorFlow.Build
OpResultTensorFlow.BuildOp
OptimizerOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
OptionsTensorFlow.Session, TensorFlow.Core
optLevelProto.Tensorflow.Core.Protobuf.Config
OpType 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
opUnrenderedTensorFlow.Output
Output 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputTensorFlow.Output
outputArgProto.Tensorflow.Core.Framework.OpDef
outputFileTensorFlow.OpGen
outputIndexTensorFlow.Output
OutputIx 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputOpTensorFlow.Output
outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
outputSlotProto.Tensorflow.Core.Protobuf.Config
pack 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
padTensorFlow.GenOps.Core
paddingFIFOQueueTensorFlow.GenOps.Core
parameterizedTruncatedNormalTensorFlow.GenOps.Core
ParsedArg 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
ParsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgDescriptionTensorFlow.OpGen.ParsedOp
parsedArgKindTensorFlow.OpGen.ParsedOp
parsedArgNameTensorFlow.OpGen.ParsedOp
parsedInputsTensorFlow.OpGen.ParsedOp
ParsedOp 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
parsedOpDescriptionTensorFlow.OpGen.ParsedOp
parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
parsedOpNameTensorFlow.OpGen.ParsedOp
parsedOpSummaryTensorFlow.OpGen.ParsedOp
parsedOutputsTensorFlow.OpGen.ParsedOp
parseOpTensorFlow.OpGen.ParsedOp
parseTensorTensorFlow.GenOps.Core
partitionGraphsProto.Tensorflow.Core.Protobuf.Config
PendingNodeNameTensorFlow.Output
perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
placeholder 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
placeholderV2TensorFlow.GenOps.Core
placeholderWithDefaultTensorFlow.GenOps.Core
placementPeriodProto.Tensorflow.Core.Protobuf.Config
placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
polygammaTensorFlow.GenOps.Core
powTensorFlow.GenOps.Core
prefixTensorFlow.OpGen
priorityQueueTensorFlow.GenOps.Core
prodTensorFlow.GenOps.Core
protoShapeTensorFlow.Types
putVarIntTensorFlow.Internal.VarInt
quantizeAndDequantizeTensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
Queue2TensorFlow.Queue
queueCloseTensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
randomCropTensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
range 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
rankTensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readMessageFromFileOrDieTensorFlow.Examples.MNIST.Parse
readMNISTLabelsTensorFlow.Examples.MNIST.Parse
readMNISTSamplesTensorFlow.Examples.MNIST.Parse
readVariableOpTensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reducedShapeTensorFlow.Ops
reduceJoinTensorFlow.GenOps.Core
RefTensorFlow.Tensor, TensorFlow.Core
refEnterTensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
RefKindTensorFlow.Tensor, TensorFlow.Core
refMergeTensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
relu 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu6TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
reluGrad 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
renderTensorFlow.Build, TensorFlow.Core
RenderedTensorFlow.Output
renderedNodeDefsTensorFlow.Build
renderNodeNameTensorFlow.Build
renderOutputTensorFlow.Build
requantizationRangeTensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
reshape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
resizeAreaTensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resourceGatherTensorFlow.GenOps.Core
ResourceHandle 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
4 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
resourceHandleValProto.Tensorflow.Core.Framework.Tensor
resourceScatterAddTensorFlow.GenOps.Core
restore 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
restoreFromNameTensorFlow.Ops
restoreSliceTensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
rsqrtTensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
run 
1 (Function)TensorFlow.Session, TensorFlow.Core
2 (Function)TensorFlow.Internal.FFI
runBuildTTensorFlow.Build
RunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
runSessionTensorFlow.Session, TensorFlow.Core
runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
runWithFeedsTensorFlow.Session, TensorFlow.Core
runWithFeeds_TensorFlow.Session, TensorFlow.Core
run_TensorFlow.Session, TensorFlow.Core
sProto.Tensorflow.Core.Framework.AttrValue
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
saveTensorFlow.Ops
Scalar 
1 (Data Constructor)TensorFlow.Nodes, TensorFlow.Core
2 (Type/Class)TensorFlow.Nodes, TensorFlow.Core
scalarTensorFlow.Ops
scalarizeTensorFlow.Ops
scalarSummaryTensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scomplexValProto.Tensorflow.Core.Framework.Tensor
sdcaFprintTensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
Session 
1 (Type/Class)TensorFlow.Session, TensorFlow.Core
2 (Type/Class)TensorFlow.Internal.FFI
sessionConfigTensorFlow.Session, TensorFlow.Core
sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
sessionTargetTensorFlow.Session, TensorFlow.Core
sessionTracerTensorFlow.Session, TensorFlow.Core
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
Shape 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
shape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
shapeNTensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoidCrossEntropyWithLogitsTensorFlow.NN
sigmoidGradTensorFlow.GenOps.Core
sign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
SimpleArgTensorFlow.OpGen.ParsedOp
sinTensorFlow.GenOps.Core
size 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.TensorShape
sliceTensorFlow.GenOps.Core
softmax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softplusTensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
spaceToBatchTensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseToDense 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
splitTensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stepStatsProto.Tensorflow.Core.Protobuf.Config
stopGradientTensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringValProto.Tensorflow.Core.Framework.Tensor
sub 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
substrTensorFlow.GenOps.Core
sum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
summaryProto.Tensorflow.Core.Framework.OpDef
SummaryTensorTensorFlow.Build
svdTensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
takeManySparseFromTensorsMapTensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
temporaryVariableTensorFlow.GenOps.Core
Tensor 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
tensorProto.Tensorflow.Core.Framework.AttrValue
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayCloseV2TensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayConcatV2TensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGatherV2TensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayGradV2TensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayReadV2TensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArrayScatterV2TensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySizeV2TensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArraySplitV2TensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorArrayWriteV2TensorFlow.GenOps.Core
tensorAttrTensorFlow.Tensor, TensorFlow.Core
tensorContentProto.Tensorflow.Core.Framework.Tensor
TensorData 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
3 (Data Constructor)TensorFlow.Internal.FFI
4 (Type/Class)TensorFlow.Internal.FFI
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Data Constructor)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorKindTensorFlow.Tensor, TensorFlow.Core
tensorKindTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
TensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
2 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
tensorRefTypeTensorFlow.Types
tensorShapeProto.Tensorflow.Core.Framework.Tensor
TensorShapeProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
TensorShapeProto'Dim 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
tensorSummaryTensorFlow.GenOps.Core
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypesTensorFlow.Types
tensorValTensorFlow.Types
testImageDataTensorFlow.Examples.MNIST.InputData
testLabelDataTensorFlow.Examples.MNIST.InputData
textLineReaderTensorFlow.GenOps.Core
TFName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
tfNameTensorFlow.OpGen.ParsedOp
tFRecordReaderTensorFlow.GenOps.Core
ThreadPoolOptionProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
tileTensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
timelineStepProto.Tensorflow.Core.Protobuf.Config
timeoutInMsProto.Tensorflow.Core.Protobuf.Config
topKTensorFlow.GenOps.Core
topKV2TensorFlow.GenOps.Core
traceLevelProto.Tensorflow.Core.Protobuf.Config
trainingImageDataTensorFlow.Examples.MNIST.InputData
trainingLabelDataTensorFlow.Examples.MNIST.InputData
transpose 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateDivTensorFlow.GenOps.Core
truncatedNormal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateModTensorFlow.GenOps.Core
type' 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
typeAttrProto.Tensorflow.Core.Framework.OpDef
TypeErrorTensorFlow.Types
typeListAttrProto.Tensorflow.Core.Framework.OpDef
unControlNodeTensorFlow.Output, TensorFlow.Build
unHaskellNameTensorFlow.OpGen.ParsedOp
uniformCandidateSamplerTensorFlow.GenOps.Core
UniqueTensorFlow.Build
uniqueTensorFlow.GenOps.Core
uniqueWithCountsTensorFlow.GenOps.Core
unknownRankProto.Tensorflow.Core.Framework.TensorShape
unNodeNameTensorFlow.Output
unOpTypeTensorFlow.Output
unOutputIxTensorFlow.Output
unpackTensorFlow.GenOps.Core
UnrenderedTensorFlow.Output
unScalarTensorFlow.Nodes, TensorFlow.Core
unsortedSegmentSumTensorFlow.GenOps.Core
unTensorDataTensorFlow.Types
unTFNameTensorFlow.OpGen.ParsedOp
usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
useProtoAsVoidPtrLenTensorFlow.Internal.FFI
ValueTensorFlow.Tensor, TensorFlow.Core
value 
1 (Function)TensorFlow.Tensor, TensorFlow.Core
2 (Function)Proto.Tensorflow.Core.Protobuf.Config
3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
ValueKindTensorFlow.Tensor, TensorFlow.Core
varHandleOpTensorFlow.GenOps.Core
variable 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
varIsInitializedOpTensorFlow.GenOps.Core
vectorTensorFlow.Ops
version 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
versionNumberProto.Tensorflow.Core.Framework.Tensor
versionsProto.Tensorflow.Core.Framework.Graph
visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
where'TensorFlow.GenOps.Core
wholeFileReaderTensorFlow.GenOps.Core
withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
withDeviceTensorFlow.Build, TensorFlow.Core
withNameScopeTensorFlow.Build, TensorFlow.Core
withNodeDependenciesTensorFlow.Build
withSessionTensorFlow.Internal.FFI
withStateLensTensorFlow.Build
writeFileTensorFlow.GenOps.Core
wtsCkptTensorFlow.Examples.MNIST.TrainedGraph
zeroInitializedVariableTensorFlow.Ops
zerosTensorFlow.Ops
zerosLike 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zetaTensorFlow.GenOps.Core
\\TensorFlow.Types
_ArgTensorFlow.GenOps.Core
_AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Config
_DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
_GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
_GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
_GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
_HostCastTensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
_NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'attrProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
_OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
_OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
_OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
_OpDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
_opInputsTensorFlow.Output
_OpList'opProto.Tensorflow.Core.Framework.OpDef
_opNameTensorFlow.Output
_OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
_opTypeTensorFlow.Output
_RecvTensorFlow.GenOps.Core
_ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
_RetvalTensorFlow.GenOps.Core
_RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
_SendTensorFlow.GenOps.Core
_TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
_TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
_ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file +

 

Index

/:/TensorFlow.Types
/=TensorFlow.Types, TensorFlow.Core
:/TensorFlow.Types
abortTensorFlow.GenOps.Core
abort'TensorFlow.GenOps.Core
abs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
abs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorApplyGradient'TensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorNumAccumulated'TensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorSetGlobalStep'TensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
accumulatorTakeGradient'TensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
acos'TensorFlow.GenOps.Core
add 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
add' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
addInitializerTensorFlow.Build
addManySparseToTensorsMapTensorFlow.GenOps.Core
addManySparseToTensorsMap'TensorFlow.GenOps.Core
addN 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addN' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
addNewOpTensorFlow.Build
addSparseToTensorsMapTensorFlow.GenOps.Core
addSparseToTensorsMap'TensorFlow.GenOps.Core
addSummaryTensorFlow.Tensor
adjustContrastTensorFlow.GenOps.Core
adjustContrast'TensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustContrastv2'TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
adjustHue'TensorFlow.GenOps.Core
adjustSaturationTensorFlow.GenOps.Core
adjustSaturation'TensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
all'TensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allCandidateSampler'TensorFlow.GenOps.Core
allocatorTypeProto.Tensorflow.Core.Protobuf.Config
allowedValuesProto.Tensorflow.Core.Framework.OpDef
allowGrowthProto.Tensorflow.Core.Protobuf.Config
allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
AllTensorTypesTensorFlow.Types
anyTensorFlow.GenOps.Core
any'TensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdadelta'TensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagrad'TensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdagradDA'TensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyAdam'TensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyCenteredRMSProp'TensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyFtrl'TensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyGradientDescent'TensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyMomentum'TensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalAdagrad'TensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyProximalGradientDescent'TensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
applyRMSProp'TensorFlow.GenOps.Core
argCaseKindTensorFlow.OpGen.ParsedOp
ArgKindTensorFlow.OpGen.ParsedOp
argKindTensorFlow.OpGen.ParsedOp
argLengthTensorFlow.OpGen.ParsedOp
argMax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
argMinTensorFlow.GenOps.Core
argMin'TensorFlow.GenOps.Core
ArgSomeTensorTensorFlow.OpGen.ParsedOp
ArgTensorBuildTensorFlow.OpGen.ParsedOp
ArgTensorRefTensorFlow.OpGen.ParsedOp
ArgTensorValueTensorFlow.OpGen.ParsedOp
ArgTypeTensorFlow.OpGen.ParsedOp
argTypeTensorFlow.OpGen.ParsedOp
ArgTypeAttrTensorFlow.OpGen.ParsedOp
argTypeAttrTensorFlow.OpGen.ParsedOp
ArgTypeFixedTensorFlow.OpGen.ParsedOp
asGraphDefTensorFlow.Build, TensorFlow.Core
asinTensorFlow.GenOps.Core
asin'TensorFlow.GenOps.Core
assertTensorFlow.GenOps.Core
assert'TensorFlow.GenOps.Core
assertAllCloseTensorFlow.Test
assign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
assignAddTensorFlow.GenOps.Core
assignAdd'TensorFlow.GenOps.Core
assignAddVariableOpTensorFlow.GenOps.Core
assignAddVariableOp'TensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignSub'TensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
assignVariableOp'TensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asString'TensorFlow.GenOps.Core
asyncProdNodesTensorFlow.Session, TensorFlow.Core
atanTensorFlow.GenOps.Core
atan'TensorFlow.GenOps.Core
Attr 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
attr 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
AttrBaseTypeTensorFlow.OpGen.ParsedOp
AttrBoolTensorFlow.OpGen.ParsedOp
AttrBytesTensorFlow.OpGen.ParsedOp
attrDescriptionTensorFlow.OpGen.ParsedOp
AttrFloatTensorFlow.OpGen.ParsedOp
AttributeTensorFlow.Types
attrInfoTensorFlow.OpGen.ParsedOp
AttrInt64TensorFlow.OpGen.ParsedOp
attrLensTensorFlow.Types
AttrListTensorFlow.OpGen.ParsedOp
attrNameTensorFlow.OpGen.ParsedOp
AttrShapeTensorFlow.OpGen.ParsedOp
AttrSingleTensorFlow.OpGen.ParsedOp
AttrTensorTensorFlow.OpGen.ParsedOp
AttrType 
1 (Type/Class)TensorFlow.OpGen.ParsedOp
2 (Data Constructor)TensorFlow.OpGen.ParsedOp
AttrValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
AttrValue'ListValue 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
audioProto.Tensorflow.Core.Framework.Summary
audioSummaryTensorFlow.GenOps.Core
audioSummary'TensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
audioSummaryV2'TensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool'TensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3D'TensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPool3DGrad'TensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
avgPoolGrad'TensorFlow.GenOps.Core
bProto.Tensorflow.Core.Framework.AttrValue
barrierTensorFlow.GenOps.Core
barrier'TensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierClose'TensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierIncompleteSize'TensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierInsertMany'TensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
barrierReadySize'TensorFlow.GenOps.Core
barrierTakeManyTensorFlow.GenOps.Core
barrierTakeMany'TensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholesky'TensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchCholeskyGrad'TensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT'TensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT2D'TensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchFFT3D'TensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT'TensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT2D'TensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchIFFT3D'TensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatMul'TensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixBandPart'TensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDeterminant'TensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiag'TensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixDiagPart'TensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixInverse'TensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSetDiag'TensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolve'TensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixSolveLs'TensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchMatrixTriangularSolve'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEig'TensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSelfAdjointEigV2'TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchSvd'TensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpace'TensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
batchToSpaceND'TensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
betainc'TensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAdd'TensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddGrad'TensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasAddV1'TensorFlow.GenOps.Core
biasCkptTensorFlow.Examples.MNIST.TrainedGraph
bitcastTensorFlow.GenOps.Core
bitcast'TensorFlow.GenOps.Core
boolValProto.Tensorflow.Core.Framework.Tensor
broadcastArgsTensorFlow.GenOps.Core
broadcastArgs'TensorFlow.GenOps.Core
broadcastGradientArgs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
broadcastGradientArgs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
bucketProto.Tensorflow.Core.Framework.Summary
bucketLimitProto.Tensorflow.Core.Framework.Summary
BuildTensorFlow.Build, TensorFlow.Core
buildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
buildCostModelProto.Tensorflow.Core.Protobuf.Config
buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
BuildInputsTensorFlow.BuildOp
buildInputsTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildResultTensorFlow.BuildOp
buildResultTensorFlow.BuildOp
BuildTTensorFlow.Build, TensorFlow.Core
camelCaseTensorFlow.OpGen.ParsedOp
cast 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
cast' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
ceilTensorFlow.GenOps.Core
ceil'TensorFlow.GenOps.Core
checkEndianTensorFlow.Examples.MNIST.Parse
checkNumericsTensorFlow.GenOps.Core
checkNumerics'TensorFlow.GenOps.Core
checkpointPathProto.Tensorflow.Core.Util.Event
choleskyTensorFlow.GenOps.Core
cholesky'TensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
choleskyGrad'TensorFlow.GenOps.Core
collectAllSummariesTensorFlow.Tensor
colocateWithTensorFlow.Tensor, TensorFlow.Core
colorspaceProto.Tensorflow.Core.Framework.Summary
complexTensorFlow.GenOps.Core
complex'TensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
complexAbs'TensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
computeAccidentalHits'TensorFlow.GenOps.Core
concat 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concat' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concatOffsetTensorFlow.GenOps.Core
concatOffset'TensorFlow.GenOps.Core
concatV2TensorFlow.GenOps.Core
concatV2'TensorFlow.GenOps.Core
conditionalAccumulatorTensorFlow.GenOps.Core
conditionalAccumulator'TensorFlow.GenOps.Core
ConfigProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
ConfigProto'DeviceCountEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
conjTensorFlow.GenOps.Core
conj'TensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
const'TensorFlow.GenOps.Core
constantTensorFlow.Ops
constant'TensorFlow.Ops
containerProto.Tensorflow.Core.Framework.ResourceHandle
contentTypeProto.Tensorflow.Core.Framework.Summary
ControlNode 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Build
2 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
controlTriggerTensorFlow.GenOps.Core
controlTrigger'TensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2D'TensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropFilter'TensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv2DBackpropInput'TensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3D'TensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilter'TensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropFilterV2'TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInput'TensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
conv3DBackpropInputV2'TensorFlow.GenOps.Core
copyTensorFlow.GenOps.Core
copy'TensorFlow.GenOps.Core
copyHostTensorFlow.GenOps.Core
copyHost'TensorFlow.GenOps.Core
cosTensorFlow.GenOps.Core
cos'TensorFlow.GenOps.Core
costGraphProto.Tensorflow.Core.Protobuf.Config
countUpToTensorFlow.GenOps.Core
countUpTo'TensorFlow.GenOps.Core
cropAndResizeTensorFlow.GenOps.Core
cropAndResize'TensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradBoxes'TensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
cropAndResizeGradImage'TensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cross'TensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCBeamSearchDecoder'TensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCGreedyDecoder'TensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cTCLoss'TensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumprod'TensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
cumsum'TensorFlow.GenOps.Core
DataType 
1 (Type/Class)TensorFlow.Types
2 (Type/Class)Proto.Tensorflow.Core.Framework.Types
dcomplexValProto.Tensorflow.Core.Framework.Tensor
debugIdentityTensorFlow.GenOps.Core
debugIdentity'TensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
debugNanCount'TensorFlow.GenOps.Core
debugNumericSummaryTensorFlow.GenOps.Core
debugNumericSummary'TensorFlow.GenOps.Core
debugOptionsProto.Tensorflow.Core.Protobuf.Config
decodeBase64TensorFlow.GenOps.Core
decodeBase64'TensorFlow.GenOps.Core
decodeCSVTensorFlow.GenOps.Core
decodeCSV'TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeGif'TensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJpeg'TensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodeJSONExample'TensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodePng'TensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeRaw'TensorFlow.GenOps.Core
decodeTensorDataTensorFlow.Types, TensorFlow.Core
decodeTFRecordsTensorFlow.Records.Conduit
defaultValueProto.Tensorflow.Core.Framework.OpDef
deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
DeleteTensorFlow.Types
deleteSessionTensorTensorFlow.GenOps.Core
deleteSessionTensor'TensorFlow.GenOps.Core
denseToDenseSetOperationTensorFlow.GenOps.Core
denseToDenseSetOperation'TensorFlow.GenOps.Core
denseToSparseSetOperationTensorFlow.GenOps.Core
denseToSparseSetOperation'TensorFlow.GenOps.Core
deprecationProto.Tensorflow.Core.Framework.OpDef
depthToSpaceTensorFlow.GenOps.Core
depthToSpace'TensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNative'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
dequantize'TensorFlow.GenOps.Core
dequeueTensorFlow.Queue
descriptionProto.Tensorflow.Core.Framework.OpDef
deserializeManySparseTensorFlow.GenOps.Core
deserializeManySparse'TensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
destroyTemporaryVariable'TensorFlow.GenOps.Core
Device 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Core
2 (Type/Class)TensorFlow.Output, TensorFlow.Core
device 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
deviceCountProto.Tensorflow.Core.Protobuf.Config
deviceFiltersProto.Tensorflow.Core.Protobuf.Config
deviceNameTensorFlow.Output, TensorFlow.Core
diagTensorFlow.GenOps.Core
diag'TensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
diagPart'TensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
digamma'TensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2D'TensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropFilter'TensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dilation2DBackpropInput'TensorFlow.GenOps.Core
dimProto.Tensorflow.Core.Framework.TensorShape
divTensorFlow.GenOps.Core
div'TensorFlow.GenOps.Core
doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
docOpListTensorFlow.OpGen
doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
doubleValProto.Tensorflow.Core.Framework.Tensor
drawBoundingBoxesTensorFlow.GenOps.Core
drawBoundingBoxes'TensorFlow.GenOps.Core
drawMNISTTensorFlow.Examples.MNIST.Parse
dtypeProto.Tensorflow.Core.Framework.Tensor
DT_BFLOAT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BFLOAT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INVALID 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
dynamicPartitionTensorFlow.GenOps.Core
dynamicPartition'TensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
dynamicStitch'TensorFlow.GenOps.Core
editDistanceTensorFlow.GenOps.Core
editDistance'TensorFlow.GenOps.Core
eluTensorFlow.GenOps.Core
elu'TensorFlow.GenOps.Core
eluGradTensorFlow.GenOps.Core
eluGrad'TensorFlow.GenOps.Core
embeddingLookupTensorFlow.EmbeddingOps
enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
encodeBase64TensorFlow.GenOps.Core
encodeBase64'TensorFlow.GenOps.Core
encodedAudioStringProto.Tensorflow.Core.Framework.Summary
encodedImageStringProto.Tensorflow.Core.Framework.Summary
encodeJpegTensorFlow.GenOps.Core
encodeJpeg'TensorFlow.GenOps.Core
encodeOutputTensorFlow.Build
encodePngTensorFlow.GenOps.Core
encodePng'TensorFlow.GenOps.Core
encodeTensorDataTensorFlow.Types, TensorFlow.Core
encodeTFRecordsTensorFlow.Records.Conduit
enqueueTensorFlow.Queue
enterTensorFlow.GenOps.Core
enter'TensorFlow.GenOps.Core
eqLengthGuardTensorFlow.BuildOp
equal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
equal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
erfTensorFlow.GenOps.Core
erf'TensorFlow.GenOps.Core
erfcTensorFlow.GenOps.Core
erfc'TensorFlow.GenOps.Core
evalBuildTTensorFlow.Build
Event 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
EventWriterTensorFlow.Logging
ExcludedCaseTensorFlow.Types
excludeListTensorFlow.OpGen
exitTensorFlow.GenOps.Core
exit'TensorFlow.GenOps.Core
expTensorFlow.GenOps.Core
exp'TensorFlow.GenOps.Core
expandDims 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
expandDims' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
explanationProto.Tensorflow.Core.Framework.OpDef
explicitInputAttrsTensorFlow.OpGen.ParsedOp
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
expm1TensorFlow.GenOps.Core
expm1'TensorFlow.GenOps.Core
exprTensorFlow.Tensor, TensorFlow.Core
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
extractGlimpseTensorFlow.GenOps.Core
extractGlimpse'TensorFlow.GenOps.Core
extractImagePatchesTensorFlow.GenOps.Core
extractImagePatches'TensorFlow.GenOps.Core
fProto.Tensorflow.Core.Framework.AttrValue
factTensorFlow.GenOps.Core
fact'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
fakeQueueTensorFlow.GenOps.Core
fakeQueue'TensorFlow.GenOps.Core
Feed 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Data Constructor)TensorFlow.Nodes
2 (Type/Class)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
fFTTensorFlow.GenOps.Core
fFT'TensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT2D'TensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fFT3D'TensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fIFOQueue'TensorFlow.GenOps.Core
fIFOQueueV2TensorFlow.GenOps.Core
fIFOQueueV2'TensorFlow.GenOps.Core
fileVersionProto.Tensorflow.Core.Util.Event
fill 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fill' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedLengthRecordReader'TensorFlow.GenOps.Core
fixedLengthRecordReaderV2TensorFlow.GenOps.Core
fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
flagParserTensorFlow.OpGen
floatValProto.Tensorflow.Core.Framework.Tensor
floorTensorFlow.GenOps.Core
floor'TensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorDiv'TensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
floorMod'TensorFlow.GenOps.Core
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPool'TensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalAvgPoolGrad'TensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPool'TensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolGrad'TensorFlow.GenOps.Core
fromTensorTypeListTensorFlow.Types
fromTensorTypesTensorFlow.Types
funcProto.Tensorflow.Core.Framework.AttrValue
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNorm'TensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedBatchNormGrad'TensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedPadConv2D'TensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
gatherTensorFlow.GenOps.Core
gather'TensorFlow.GenOps.Core
gatherNdTensorFlow.GenOps.Core
gatherNd'TensorFlow.GenOps.Core
getAllOpListTensorFlow.Internal.FFI
getFetchTensorFlow.Nodes
getNodesTensorFlow.Nodes
getOrAddOpTensorFlow.Build
getSessionHandleTensorFlow.GenOps.Core
getSessionHandle'TensorFlow.GenOps.Core
getSessionTensorTensorFlow.GenOps.Core
getSessionTensor'TensorFlow.GenOps.Core
getTFRecordTensorFlow.Records
getTFRecordDataTensorFlow.Records
getTFRecordLengthTensorFlow.Records
getTFRecordsTensorFlow.Records
getVarIntTensorFlow.Internal.VarInt
globalJitLevelProto.Tensorflow.Core.Protobuf.Config
GPUOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
gpuOptionsProto.Tensorflow.Core.Protobuf.Config
gradientsTensorFlow.Gradient
GraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
2 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
graphDefProto.Tensorflow.Core.Util.Event
GraphOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
graphOptionsProto.Tensorflow.Core.Protobuf.Config
GraphStateTensorFlow.Build
greaterTensorFlow.GenOps.Core
greater'TensorFlow.GenOps.Core
greaterEqualTensorFlow.GenOps.Core
greaterEqual'TensorFlow.GenOps.Core
groupTensorFlow.ControlFlow, TensorFlow.Core
halfValProto.Tensorflow.Core.Framework.Tensor
hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
hashTableTensorFlow.GenOps.Core
hashTable'TensorFlow.GenOps.Core
HaskellName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
haskellNameTensorFlow.OpGen.ParsedOp
hasMinimumProto.Tensorflow.Core.Framework.OpDef
heightProto.Tensorflow.Core.Framework.Summary
histoProto.Tensorflow.Core.Framework.Summary
HistogramProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
histogramSummary 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Logging
histogramSummary'TensorFlow.GenOps.Core
hoistBuildTTensorFlow.Build
hSVToRGBTensorFlow.GenOps.Core
hSVToRGB'TensorFlow.GenOps.Core
iProto.Tensorflow.Core.Framework.AttrValue
identity 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identity' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identityReaderTensorFlow.GenOps.Core
identityReader'TensorFlow.GenOps.Core
identityReaderV2TensorFlow.GenOps.Core
identityReaderV2'TensorFlow.GenOps.Core
iFFTTensorFlow.GenOps.Core
iFFT'TensorFlow.GenOps.Core
iFFT2DTensorFlow.GenOps.Core
iFFT2D'TensorFlow.GenOps.Core
iFFT3DTensorFlow.GenOps.Core
iFFT3D'TensorFlow.GenOps.Core
igammaTensorFlow.GenOps.Core
igamma'TensorFlow.GenOps.Core
igammacTensorFlow.GenOps.Core
igammac'TensorFlow.GenOps.Core
imagTensorFlow.GenOps.Core
imag'TensorFlow.GenOps.Core
imageProto.Tensorflow.Core.Framework.Summary
imageSummaryTensorFlow.GenOps.Core
imageSummary'TensorFlow.GenOps.Core
immutableConstTensorFlow.GenOps.Core
immutableConst'TensorFlow.GenOps.Core
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
inferredTypeAttrsTensorFlow.OpGen.ParsedOp
inferShapesProto.Tensorflow.Core.Protobuf.Config
initializedVariableTensorFlow.Ops
initializedVariable'TensorFlow.Ops
initializeTableTensorFlow.GenOps.Core
initializeTable'TensorFlow.GenOps.Core
initializeTableFromTextFileTensorFlow.GenOps.Core
initializeTableFromTextFile'TensorFlow.GenOps.Core
inputProto.Tensorflow.Core.Framework.NodeDef
inputArgProto.Tensorflow.Core.Framework.OpDef
int64ValProto.Tensorflow.Core.Framework.Tensor
interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
inTopKTensorFlow.GenOps.Core
inTopK'TensorFlow.GenOps.Core
intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
intValProto.Tensorflow.Core.Framework.Tensor
invTensorFlow.GenOps.Core
inv'TensorFlow.GenOps.Core
invertPermutationTensorFlow.GenOps.Core
invertPermutation'TensorFlow.GenOps.Core
invGradTensorFlow.GenOps.Core
invGrad'TensorFlow.GenOps.Core
isAggregateProto.Tensorflow.Core.Framework.OpDef
isCommutativeProto.Tensorflow.Core.Framework.OpDef
isFiniteTensorFlow.GenOps.Core
isFinite'TensorFlow.GenOps.Core
isInfTensorFlow.GenOps.Core
isInf'TensorFlow.GenOps.Core
isNanTensorFlow.GenOps.Core
isNan'TensorFlow.GenOps.Core
isRefProto.Tensorflow.Core.Framework.OpDef
isStatefulProto.Tensorflow.Core.Framework.OpDef
isVariableInitializedTensorFlow.GenOps.Core
isVariableInitialized'TensorFlow.GenOps.Core
key 
1 (Function)Proto.Tensorflow.Core.Protobuf.Config
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
l2LossTensorFlow.GenOps.Core
l2Loss'TensorFlow.GenOps.Core
learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
learnedUnigramCandidateSampler'TensorFlow.GenOps.Core
lengthFramesProto.Tensorflow.Core.Framework.Summary
lessTensorFlow.GenOps.Core
less'TensorFlow.GenOps.Core
lessEqualTensorFlow.GenOps.Core
lessEqual'TensorFlow.GenOps.Core
levelProto.Tensorflow.Core.Util.Event
lgammaTensorFlow.GenOps.Core
lgamma'TensorFlow.GenOps.Core
libraryProto.Tensorflow.Core.Framework.Graph
linSpaceTensorFlow.GenOps.Core
linSpace'TensorFlow.GenOps.Core
ListTensorFlow.Types
listProto.Tensorflow.Core.Framework.AttrValue
ListArgTensorFlow.OpGen.ParsedOp
listDiffTensorFlow.GenOps.Core
listDiff'TensorFlow.GenOps.Core
ListOfTensorFlow.Types
logTensorFlow.GenOps.Core
log'TensorFlow.GenOps.Core
log1pTensorFlow.GenOps.Core
log1p'TensorFlow.GenOps.Core
logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
logEventTensorFlow.Logging
logicalAndTensorFlow.GenOps.Core
logicalAnd'TensorFlow.GenOps.Core
logicalNotTensorFlow.GenOps.Core
logicalNot'TensorFlow.GenOps.Core
logicalOrTensorFlow.GenOps.Core
logicalOr'TensorFlow.GenOps.Core
LogMessage 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
logMessageProto.Tensorflow.Core.Util.Event
LogMessage'DEBUGProto.Tensorflow.Core.Util.Event
LogMessage'ERRORProto.Tensorflow.Core.Util.Event
LogMessage'FATALProto.Tensorflow.Core.Util.Event
LogMessage'INFOProto.Tensorflow.Core.Util.Event
LogMessage'LevelProto.Tensorflow.Core.Util.Event
LogMessage'UNKNOWNProto.Tensorflow.Core.Util.Event
LogMessage'WARNProto.Tensorflow.Core.Util.Event
logSoftmaxTensorFlow.GenOps.Core
logSoftmax'TensorFlow.GenOps.Core
logSummaryTensorFlow.Logging
logUniformCandidateSamplerTensorFlow.GenOps.Core
logUniformCandidateSampler'TensorFlow.GenOps.Core
lookupNodeTensorFlow.Build
lookupTableExportTensorFlow.GenOps.Core
lookupTableExport'TensorFlow.GenOps.Core
lookupTableFindTensorFlow.GenOps.Core
lookupTableFind'TensorFlow.GenOps.Core
lookupTableImportTensorFlow.GenOps.Core
lookupTableImport'TensorFlow.GenOps.Core
lookupTableInsertTensorFlow.GenOps.Core
lookupTableInsert'TensorFlow.GenOps.Core
lookupTableSizeTensorFlow.GenOps.Core
lookupTableSize'TensorFlow.GenOps.Core
loopCondTensorFlow.GenOps.Core
loopCond'TensorFlow.GenOps.Core
lRNTensorFlow.GenOps.Core
lRN'TensorFlow.GenOps.Core
lRNGradTensorFlow.GenOps.Core
lRNGrad'TensorFlow.GenOps.Core
makeQueueTensorFlow.Queue
matchingFilesTensorFlow.GenOps.Core
matchingFiles'TensorFlow.GenOps.Core
matMul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matMul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matrixBandPartTensorFlow.GenOps.Core
matrixBandPart'TensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDeterminant'TensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiag'TensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixDiagPart'TensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixInverse'TensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSetDiag'TensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolve'TensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixSolveLs'TensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matrixTriangularSolve'TensorFlow.GenOps.Core
matTransposeTensorFlow.Ops
matTranspose'TensorFlow.Ops
max 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
max'TensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maximum'TensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool'TensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3D'TensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPool3DGrad'TensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGrad'TensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolGradWithArgmax'TensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmax'TensorFlow.GenOps.Core
maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
maybe'audioProto.Tensorflow.Core.Framework.Summary
maybe'bProto.Tensorflow.Core.Framework.AttrValue
maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
maybe'fProto.Tensorflow.Core.Framework.AttrValue
maybe'fileVersionProto.Tensorflow.Core.Util.Event
maybe'funcProto.Tensorflow.Core.Framework.AttrValue
maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'graphDefProto.Tensorflow.Core.Util.Event
maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'histoProto.Tensorflow.Core.Framework.Summary
maybe'iProto.Tensorflow.Core.Framework.AttrValue
maybe'imageProto.Tensorflow.Core.Framework.Summary
maybe'libraryProto.Tensorflow.Core.Framework.Graph
maybe'listProto.Tensorflow.Core.Framework.AttrValue
maybe'logMessageProto.Tensorflow.Core.Util.Event
maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'sProto.Tensorflow.Core.Framework.AttrValue
maybe'sessionLogProto.Tensorflow.Core.Util.Event
maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
maybe'summaryProto.Tensorflow.Core.Util.Event
maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
maybe'tensor 
1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
2 (Function)Proto.Tensorflow.Core.Framework.Summary
maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
maybe'value 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
maybe'versionsProto.Tensorflow.Core.Framework.Graph
maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
mean 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mean' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mergeTensorFlow.GenOps.Core
merge'TensorFlow.GenOps.Core
mergeAllSummariesTensorFlow.Logging
mergeSummaryTensorFlow.GenOps.Core
mergeSummary'TensorFlow.GenOps.Core
mergeV2CheckpointsTensorFlow.GenOps.Core
mergeV2Checkpoints'TensorFlow.GenOps.Core
messageProto.Tensorflow.Core.Util.Event
metaGraphDefProto.Tensorflow.Core.Util.Event
min 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
min'TensorFlow.GenOps.Core
minimum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
minimum'TensorFlow.GenOps.Core
mirrorPadTensorFlow.GenOps.Core
mirrorPad'TensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
mirrorPadGrad'TensorFlow.GenOps.Core
MixedListArgTensorFlow.OpGen.ParsedOp
MNISTTensorFlow.Examples.MNIST.Parse
mnistPbTensorFlow.Examples.MNIST.TrainedGraph
modTensorFlow.GenOps.Core
mod'TensorFlow.GenOps.Core
MonadBuildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
msgProto.Tensorflow.Core.Util.Event
mul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
multinomialTensorFlow.GenOps.Core
multinomial'TensorFlow.GenOps.Core
mutableDenseHashTableTensorFlow.GenOps.Core
mutableDenseHashTable'TensorFlow.GenOps.Core
mutableHashTableTensorFlow.GenOps.Core
mutableHashTable'TensorFlow.GenOps.Core
mutableHashTableOfTensorsTensorFlow.GenOps.Core
mutableHashTableOfTensors'TensorFlow.GenOps.Core
Name 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
name 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
4 (Function)Proto.Tensorflow.Core.Framework.TensorShape
5 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
NameAttrList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
NameAttrList'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
neg 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
neg' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
negTrainTensorFlow.GenOps.Core
negTrain'TensorFlow.GenOps.Core
nextIterationTensorFlow.GenOps.Core
nextIteration'TensorFlow.GenOps.Core
NilTensorFlow.Types
nodeProto.Tensorflow.Core.Framework.Graph
NodeDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeDef'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeName 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
nodeNameProto.Tensorflow.Core.Framework.Summary
NodesTensorFlow.Nodes, TensorFlow.Core
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
nonMaxSuppressionTensorFlow.GenOps.Core
nonMaxSuppression'TensorFlow.GenOps.Core
noOp 
1 (Function)TensorFlow.ControlFlow, TensorFlow.Core
2 (Function)TensorFlow.GenOps.Core
noOp'TensorFlow.GenOps.Core
notEqualTensorFlow.GenOps.Core
notEqual'TensorFlow.GenOps.Core
numProto.Tensorflow.Core.Framework.Summary
numberAttrProto.Tensorflow.Core.Framework.OpDef
numChannelsProto.Tensorflow.Core.Framework.Summary
numThreadsProto.Tensorflow.Core.Protobuf.Config
obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
oneHot 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
oneHot' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
OneOfTensorFlow.Types, TensorFlow.Core
OneOfsTensorFlow.Types
op 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
opAttrTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
4 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefTensorFlow.Build
OpDef'ArgDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
OpDef'AttrDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefWithNameTensorFlow.Build
OpDeprecation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
OpGenFlags 
1 (Data Constructor)TensorFlow.OpGen
2 (Type/Class)TensorFlow.OpGen
opInputsTensorFlow.Output, TensorFlow.Build
OpList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opNameTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
OpParamsTensorFlow.BuildOp
OptimizerOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
OptionsTensorFlow.Session, TensorFlow.Core
optLevelProto.Tensorflow.Core.Protobuf.Config
OpType 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
Output 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputTensorFlow.Output
outputArgProto.Tensorflow.Core.Framework.OpDef
outputFileTensorFlow.OpGen
outputIndexTensorFlow.Output
OutputIx 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputNodeNameTensorFlow.Output
outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
pack 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
pack' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
padTensorFlow.GenOps.Core
pad'TensorFlow.GenOps.Core
paddingFIFOQueueTensorFlow.GenOps.Core
paddingFIFOQueue'TensorFlow.GenOps.Core
paddingFIFOQueueV2TensorFlow.GenOps.Core
paddingFIFOQueueV2'TensorFlow.GenOps.Core
parallelConcatTensorFlow.GenOps.Core
parallelConcat'TensorFlow.GenOps.Core
parameterizedTruncatedNormalTensorFlow.GenOps.Core
parameterizedTruncatedNormal'TensorFlow.GenOps.Core
ParsedArg 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
ParsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgDescriptionTensorFlow.OpGen.ParsedOp
parsedArgNameTensorFlow.OpGen.ParsedOp
parsedInputsTensorFlow.OpGen.ParsedOp
ParsedOp 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
parsedOpDescriptionTensorFlow.OpGen.ParsedOp
parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
parsedOpNameTensorFlow.OpGen.ParsedOp
parsedOpSummaryTensorFlow.OpGen.ParsedOp
parsedOutputsTensorFlow.OpGen.ParsedOp
parseExampleTensorFlow.GenOps.Core
parseExample'TensorFlow.GenOps.Core
parseOpTensorFlow.OpGen.ParsedOp
parseSingleSequenceExampleTensorFlow.GenOps.Core
parseSingleSequenceExample'TensorFlow.GenOps.Core
parseTensorTensorFlow.GenOps.Core
parseTensor'TensorFlow.GenOps.Core
partitionGraphsProto.Tensorflow.Core.Protobuf.Config
PendingNodeNameTensorFlow.Output
perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
placeholder 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
placeholder' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
placeholderV2TensorFlow.GenOps.Core
placeholderV2'TensorFlow.GenOps.Core
placeholderWithDefaultTensorFlow.GenOps.Core
placeholderWithDefault'TensorFlow.GenOps.Core
placementPeriodProto.Tensorflow.Core.Protobuf.Config
placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
polygammaTensorFlow.GenOps.Core
polygamma'TensorFlow.GenOps.Core
powTensorFlow.GenOps.Core
pow'TensorFlow.GenOps.Core
prefixTensorFlow.OpGen
preventGradientTensorFlow.GenOps.Core
preventGradient'TensorFlow.GenOps.Core
printTensorFlow.GenOps.Core
print'TensorFlow.GenOps.Core
priorityQueueTensorFlow.GenOps.Core
priorityQueue'TensorFlow.GenOps.Core
priorityQueueV2TensorFlow.GenOps.Core
priorityQueueV2'TensorFlow.GenOps.Core
prodTensorFlow.GenOps.Core
prod'TensorFlow.GenOps.Core
protoShapeTensorFlow.Types
pureOpTensorFlow.BuildOp
PureResultTensorFlow.BuildOp
pureResultTensorFlow.BuildOp
putTFRecordTensorFlow.Records
putTFRecordDataTensorFlow.Records
putTFRecordLengthTensorFlow.Records
putVarIntTensorFlow.Internal.VarInt
qrTensorFlow.GenOps.Core
qr'TensorFlow.GenOps.Core
quantizeAndDequantizeTensorFlow.GenOps.Core
quantizeAndDequantize'TensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedAvgPool'TensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedBiasAdd'TensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConcat'TensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedConv2D'TensorFlow.GenOps.Core
quantizedInstanceNormTensorFlow.GenOps.Core
quantizedInstanceNorm'TensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMatMul'TensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizedMaxPool'TensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu'TensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedRelu6'TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReluX'TensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizedReshape'TensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
quantizeV2'TensorFlow.GenOps.Core
QueueTensorFlow.Queue
queueCloseTensorFlow.GenOps.Core
queueClose'TensorFlow.GenOps.Core
queueCloseV2TensorFlow.GenOps.Core
queueCloseV2'TensorFlow.GenOps.Core
queueDequeueTensorFlow.GenOps.Core
queueDequeue'TensorFlow.GenOps.Core
queueDequeueManyTensorFlow.GenOps.Core
queueDequeueMany'TensorFlow.GenOps.Core
queueDequeueManyV2TensorFlow.GenOps.Core
queueDequeueManyV2'TensorFlow.GenOps.Core
queueDequeueUpToTensorFlow.GenOps.Core
queueDequeueUpTo'TensorFlow.GenOps.Core
queueDequeueUpToV2TensorFlow.GenOps.Core
queueDequeueUpToV2'TensorFlow.GenOps.Core
queueDequeueV2TensorFlow.GenOps.Core
queueDequeueV2'TensorFlow.GenOps.Core
queueEnqueueTensorFlow.GenOps.Core
queueEnqueue'TensorFlow.GenOps.Core
queueEnqueueManyTensorFlow.GenOps.Core
queueEnqueueMany'TensorFlow.GenOps.Core
queueEnqueueManyV2TensorFlow.GenOps.Core
queueEnqueueManyV2'TensorFlow.GenOps.Core
queueEnqueueV2TensorFlow.GenOps.Core
queueEnqueueV2'TensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
queueSize'TensorFlow.GenOps.Core
queueSizeV2TensorFlow.GenOps.Core
queueSizeV2'TensorFlow.GenOps.Core
randomCropTensorFlow.GenOps.Core
randomCrop'TensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomGamma'TensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffle'TensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomShuffleQueue'TensorFlow.GenOps.Core
randomShuffleQueueV2TensorFlow.GenOps.Core
randomShuffleQueueV2'TensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomStandardNormal'TensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniform'TensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
randomUniformInt'TensorFlow.GenOps.Core
range 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
range' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
rankTensorFlow.GenOps.Core
rank'TensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumRecordsProduced'TensorFlow.GenOps.Core
readerNumRecordsProducedV2TensorFlow.GenOps.Core
readerNumRecordsProducedV2'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerRead'TensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerReadUpTo'TensorFlow.GenOps.Core
readerReadUpToV2TensorFlow.GenOps.Core
readerReadUpToV2'TensorFlow.GenOps.Core
readerReadV2TensorFlow.GenOps.Core
readerReadV2'TensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerReset'TensorFlow.GenOps.Core
readerResetV2TensorFlow.GenOps.Core
readerResetV2'TensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerRestoreState'TensorFlow.GenOps.Core
readerRestoreStateV2TensorFlow.GenOps.Core
readerRestoreStateV2'TensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readerSerializeState'TensorFlow.GenOps.Core
readerSerializeStateV2TensorFlow.GenOps.Core
readerSerializeStateV2'TensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readFile'TensorFlow.GenOps.Core
readMessageFromFileOrDieTensorFlow.Examples.MNIST.Parse
readMNISTLabelsTensorFlow.Examples.MNIST.Parse
readMNISTSamplesTensorFlow.Examples.MNIST.Parse
readVariableOpTensorFlow.GenOps.Core
readVariableOp'TensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
real'TensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
realDiv'TensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocal'TensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reciprocalGrad'TensorFlow.GenOps.Core
recordInputTensorFlow.GenOps.Core
recordInput'TensorFlow.GenOps.Core
reducedShapeTensorFlow.Ops
reduceJoinTensorFlow.GenOps.Core
reduceJoin'TensorFlow.GenOps.Core
Ref 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
refEnterTensorFlow.GenOps.Core
refEnter'TensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refExit'TensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
refIdentity'TensorFlow.GenOps.Core
refMergeTensorFlow.GenOps.Core
refMerge'TensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refNextIteration'TensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSelect'TensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
refSwitch'TensorFlow.GenOps.Core
relu 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu6TensorFlow.GenOps.Core
relu6'TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
relu6Grad'TensorFlow.GenOps.Core
reluGrad 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reluGrad' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
renderTensorFlow.Tensor, TensorFlow.Core
RenderedTensorFlow.Tensor
renderedTensorFlow.Tensor
renderedNodeDefsTensorFlow.Build
renderedOutputTensorFlow.Tensor
renderValueTensorFlow.Tensor
requantizationRangeTensorFlow.GenOps.Core
requantizationRange'TensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
requantize'TensorFlow.GenOps.Core
reshape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reshape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
resizeAreaTensorFlow.GenOps.Core
resizeArea'TensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBicubic'TensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinear'TensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeBilinearGrad'TensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighbor'TensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resizeNearestNeighborGrad'TensorFlow.GenOps.Core
resourceApplyAdadeltaTensorFlow.GenOps.Core
resourceApplyAdadelta'TensorFlow.GenOps.Core
resourceApplyAdagradTensorFlow.GenOps.Core
resourceApplyAdagrad'TensorFlow.GenOps.Core
resourceApplyAdagradDATensorFlow.GenOps.Core
resourceApplyAdagradDA'TensorFlow.GenOps.Core
resourceApplyAdamTensorFlow.GenOps.Core
resourceApplyAdam'TensorFlow.GenOps.Core
resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceApplyFtrlTensorFlow.GenOps.Core
resourceApplyFtrl'TensorFlow.GenOps.Core
resourceApplyGradientDescentTensorFlow.GenOps.Core
resourceApplyGradientDescent'TensorFlow.GenOps.Core
resourceApplyMomentumTensorFlow.GenOps.Core
resourceApplyMomentum'TensorFlow.GenOps.Core
resourceApplyProximalAdagradTensorFlow.GenOps.Core
resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceApplyRMSPropTensorFlow.GenOps.Core
resourceApplyRMSProp'TensorFlow.GenOps.Core
ResourceArgTensorFlow.OpGen.ParsedOp
resourceGatherTensorFlow.GenOps.Core
resourceGather'TensorFlow.GenOps.Core
ResourceHandle 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
4 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
resourceHandleValProto.Tensorflow.Core.Framework.Tensor
resourceScatterAddTensorFlow.GenOps.Core
resourceScatterAdd'TensorFlow.GenOps.Core
resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
resourceSparseApplyAdagradTensorFlow.GenOps.Core
resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyAdagradDATensorFlow.GenOps.Core
resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceSparseApplyFtrlTensorFlow.GenOps.Core
resourceSparseApplyFtrl'TensorFlow.GenOps.Core
resourceSparseApplyMomentumTensorFlow.GenOps.Core
resourceSparseApplyMomentum'TensorFlow.GenOps.Core
resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceSparseApplyRMSPropTensorFlow.GenOps.Core
resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
restore 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
restore'TensorFlow.GenOps.Core
restoreFromNameTensorFlow.Ops
restoreSliceTensorFlow.GenOps.Core
restoreSlice'TensorFlow.GenOps.Core
restoreV2TensorFlow.GenOps.Core
restoreV2'TensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverse'TensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseSequence'TensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
reverseV2'TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rGBToHSV'TensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
rint'TensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
round'TensorFlow.GenOps.Core
RPCOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
rpcOptionsProto.Tensorflow.Core.Protobuf.Config
rsqrtTensorFlow.GenOps.Core
rsqrt'TensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
rsqrtGrad'TensorFlow.GenOps.Core
run 
1 (Function)TensorFlow.Session, TensorFlow.Core
2 (Function)TensorFlow.Internal.FFI
runBuildTTensorFlow.Build
RunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
runMetadataProto.Tensorflow.Core.Util.Event
RunOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
runRefTensorFlow.Tensor
runSessionTensorFlow.Session, TensorFlow.Core
runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
runValueTensorFlow.Tensor
runWithFeedsTensorFlow.Session, TensorFlow.Core
runWithFeeds_TensorFlow.Session, TensorFlow.Core
run_TensorFlow.Session, TensorFlow.Core
sProto.Tensorflow.Core.Framework.AttrValue
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
sampleDistortedBoundingBox'TensorFlow.GenOps.Core
sampleRateProto.Tensorflow.Core.Framework.Summary
save 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
save'TensorFlow.GenOps.Core
saveSlicesTensorFlow.GenOps.Core
saveSlices'TensorFlow.GenOps.Core
saveV2TensorFlow.GenOps.Core
saveV2'TensorFlow.GenOps.Core
Scalar 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
scalarTensorFlow.Ops
scalar'TensorFlow.Ops
scalarizeTensorFlow.Ops
scalarSummary 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Logging
scalarSummary'TensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterAdd'TensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterDiv'TensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterMul'TensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNd'TensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdAdd'TensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdSub'TensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterNdUpdate'TensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterSub'TensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scatterUpdate'TensorFlow.GenOps.Core
scomplexValProto.Tensorflow.Core.Framework.Tensor
sdcaFprintTensorFlow.GenOps.Core
sdcaFprint'TensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaOptimizer'TensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
sdcaShrinkL1'TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMax'TensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMean'TensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentMin'TensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentProd'TensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
segmentSum'TensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
select'TensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEig'TensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
selfAdjointEigV2'TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeManySparse'TensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
serializeSparse'TensorFlow.GenOps.Core
Session 
1 (Type/Class)TensorFlow.Session, TensorFlow.Core
2 (Type/Class)TensorFlow.Internal.FFI
sessionConfigTensorFlow.Session, TensorFlow.Core
sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
SessionLog 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
sessionLogProto.Tensorflow.Core.Util.Event
SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
SessionLog'STARTProto.Tensorflow.Core.Util.Event
SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
SessionLog'STOPProto.Tensorflow.Core.Util.Event
sessionTargetTensorFlow.Session, TensorFlow.Core
sessionTracerTensorFlow.Session, TensorFlow.Core
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
setSizeTensorFlow.GenOps.Core
setSize'TensorFlow.GenOps.Core
Shape 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
shape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
shape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
shapeNTensorFlow.GenOps.Core
shapeN'TensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilename'TensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
shardedFilespec'TensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoid'TensorFlow.GenOps.Core
sigmoidCrossEntropyWithLogitsTensorFlow.NN
sigmoidGradTensorFlow.GenOps.Core
sigmoidGrad'TensorFlow.GenOps.Core
sign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
SimpleArgTensorFlow.OpGen.ParsedOp
simpleValueProto.Tensorflow.Core.Framework.Summary
sinTensorFlow.GenOps.Core
sin'TensorFlow.GenOps.Core
sinkTFRecordsTensorFlow.Records.Conduit
size 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.TensorShape
size' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
skipgramTensorFlow.GenOps.Core
skipgram'TensorFlow.GenOps.Core
sliceTensorFlow.GenOps.Core
slice'TensorFlow.GenOps.Core
softmax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softplusTensorFlow.GenOps.Core
softplus'TensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softplusGrad'TensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsign'TensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
softsignGrad'TensorFlow.GenOps.Core
sourceTFRecordsTensorFlow.Records.Conduit
spaceToBatchTensorFlow.GenOps.Core
spaceToBatch'TensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToBatchND'TensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
spaceToDepth'TensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAdd'TensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseAddGrad'TensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdadelta'TensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagrad'TensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyAdagradDA'TensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyFtrl'TensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyMomentum'TensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseApplyRMSProp'TensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseConcat'TensorFlow.GenOps.Core
sparseConditionalAccumulatorTensorFlow.GenOps.Core
sparseConditionalAccumulator'TensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseAdd'TensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseDiv'TensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseDenseCwiseMul'TensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseMatMul'TensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSum'TensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReduceSumSparse'TensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReorder'TensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseReshape'TensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMean'TensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentMeanGrad'TensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtN'TensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSegmentSum'TensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmax'TensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMaximum'TensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSparseMinimum'TensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseSplit'TensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseAdd'TensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseTensorDenseMatMul'TensorFlow.GenOps.Core
sparseToDense 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToDense' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToSparseSetOperationTensorFlow.GenOps.Core
sparseToSparseSetOperation'TensorFlow.GenOps.Core
splitTensorFlow.GenOps.Core
split'TensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
splitV'TensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrt'TensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
sqrtGrad'TensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
square'TensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squaredDifference'TensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
squeeze'TensorFlow.GenOps.Core
stackTensorFlow.GenOps.Core
stack'TensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackClose'TensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPop'TensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stackPush'TensorFlow.GenOps.Core
stageTensorFlow.GenOps.Core
stage'TensorFlow.GenOps.Core
statusProto.Tensorflow.Core.Util.Event
stepProto.Tensorflow.Core.Util.Event
stepStatsProto.Tensorflow.Core.Protobuf.Config
stopGradientTensorFlow.GenOps.Core
stopGradient'TensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSlice'TensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceAssign'TensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stridedSliceGrad'TensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringJoin'TensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringSplit'TensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucket'TensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketFast'TensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToHashBucketStrong'TensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringToNumber'TensorFlow.GenOps.Core
stringValProto.Tensorflow.Core.Framework.Tensor
sub 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sub' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
substrTensorFlow.GenOps.Core
substr'TensorFlow.GenOps.Core
sum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.Summary
sum' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
summariesTensorFlow.Build
Summary 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
summary 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Util.Event
Summary'Audio 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Image 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Value 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryDescription 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryTensor 
1 (Type/Class)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Logging
sumSquaresProto.Tensorflow.Core.Framework.Summary
svdTensorFlow.GenOps.Core
svd'TensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
switch'TensorFlow.GenOps.Core
tag 
1 (Function)Proto.Tensorflow.Core.Util.Event
2 (Function)Proto.Tensorflow.Core.Framework.Summary
TaggedRunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
taggedRunMetadataProto.Tensorflow.Core.Util.Event
takeManySparseFromTensorsMapTensorFlow.GenOps.Core
takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tan'TensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanh'TensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
tanhGrad'TensorFlow.GenOps.Core
temporaryVariableTensorFlow.GenOps.Core
temporaryVariable'TensorFlow.GenOps.Core
Tensor 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
tensor 
1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
2 (Function)Proto.Tensorflow.Core.Framework.Summary
tensorArrayTensorFlow.GenOps.Core
tensorArray'TensorFlow.GenOps.Core
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayClose'TensorFlow.GenOps.Core
tensorArrayCloseV2TensorFlow.GenOps.Core
tensorArrayCloseV2'TensorFlow.GenOps.Core
tensorArrayCloseV3TensorFlow.GenOps.Core
tensorArrayCloseV3'TensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayConcat'TensorFlow.GenOps.Core
tensorArrayConcatV2TensorFlow.GenOps.Core
tensorArrayConcatV2'TensorFlow.GenOps.Core
tensorArrayConcatV3TensorFlow.GenOps.Core
tensorArrayConcatV3'TensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGather'TensorFlow.GenOps.Core
tensorArrayGatherV2TensorFlow.GenOps.Core
tensorArrayGatherV2'TensorFlow.GenOps.Core
tensorArrayGatherV3TensorFlow.GenOps.Core
tensorArrayGatherV3'TensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayGrad'TensorFlow.GenOps.Core
tensorArrayGradV2TensorFlow.GenOps.Core
tensorArrayGradV2'TensorFlow.GenOps.Core
tensorArrayGradV3TensorFlow.GenOps.Core
tensorArrayGradV3'TensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayPack'TensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayRead'TensorFlow.GenOps.Core
tensorArrayReadV2TensorFlow.GenOps.Core
tensorArrayReadV2'TensorFlow.GenOps.Core
tensorArrayReadV3TensorFlow.GenOps.Core
tensorArrayReadV3'TensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArrayScatter'TensorFlow.GenOps.Core
tensorArrayScatterV2TensorFlow.GenOps.Core
tensorArrayScatterV2'TensorFlow.GenOps.Core
tensorArrayScatterV3TensorFlow.GenOps.Core
tensorArrayScatterV3'TensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySize'TensorFlow.GenOps.Core
tensorArraySizeV2TensorFlow.GenOps.Core
tensorArraySizeV2'TensorFlow.GenOps.Core
tensorArraySizeV3TensorFlow.GenOps.Core
tensorArraySizeV3'TensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArraySplit'TensorFlow.GenOps.Core
tensorArraySplitV2TensorFlow.GenOps.Core
tensorArraySplitV2'TensorFlow.GenOps.Core
tensorArraySplitV3TensorFlow.GenOps.Core
tensorArraySplitV3'TensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayUnpack'TensorFlow.GenOps.Core
tensorArrayV2TensorFlow.GenOps.Core
tensorArrayV2'TensorFlow.GenOps.Core
tensorArrayV3TensorFlow.GenOps.Core
tensorArrayV3'TensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorArrayWrite'TensorFlow.GenOps.Core
tensorArrayWriteV2TensorFlow.GenOps.Core
tensorArrayWriteV2'TensorFlow.GenOps.Core
tensorArrayWriteV3TensorFlow.GenOps.Core
tensorArrayWriteV3'TensorFlow.GenOps.Core
tensorContentProto.Tensorflow.Core.Framework.Tensor
TensorData 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
3 (Data Constructor)TensorFlow.Internal.FFI
4 (Type/Class)TensorFlow.Internal.FFI
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
TensorDataTypeTensorFlow.Types, TensorFlow.Core
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Data Constructor)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorKindTensorFlow.Tensor
TensorListTensorFlow.Tensor
tensorListOutputsTensorFlow.Tensor
tensorNodeNameTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
TensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
2 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
tensorRefFromNameTensorFlow.Tensor
tensorRefTypeTensorFlow.Types
tensorShapeProto.Tensorflow.Core.Framework.Tensor
TensorShapeProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
TensorShapeProto'Dim 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
tensorSummaryTensorFlow.GenOps.Core
tensorSummary'TensorFlow.GenOps.Core
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypeListTensorFlow.Types
TensorTypeProxy 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types
TensorTypesTensorFlow.Types
tensorTypesTensorFlow.Types
tensorValTensorFlow.Types
tensorValueFromNameTensorFlow.Tensor
testImageDataTensorFlow.Examples.MNIST.InputData
testLabelDataTensorFlow.Examples.MNIST.InputData
textLineReaderTensorFlow.GenOps.Core
textLineReader'TensorFlow.GenOps.Core
textLineReaderV2TensorFlow.GenOps.Core
textLineReaderV2'TensorFlow.GenOps.Core
TFName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
tfNameTensorFlow.OpGen.ParsedOp
tFRecordReaderTensorFlow.GenOps.Core
tFRecordReader'TensorFlow.GenOps.Core
tFRecordReaderV2TensorFlow.GenOps.Core
tFRecordReaderV2'TensorFlow.GenOps.Core
ThreadPoolOptionProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
tileTensorFlow.GenOps.Core
tile'TensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
tileGrad'TensorFlow.GenOps.Core
timelineStepProto.Tensorflow.Core.Protobuf.Config
timeoutInMsProto.Tensorflow.Core.Protobuf.Config
toBuildTensorFlow.Tensor
topKTensorFlow.GenOps.Core
topK'TensorFlow.GenOps.Core
topKV2TensorFlow.GenOps.Core
topKV2'TensorFlow.GenOps.Core
traceLevelProto.Tensorflow.Core.Protobuf.Config
trainingImageDataTensorFlow.Examples.MNIST.InputData
trainingLabelDataTensorFlow.Examples.MNIST.InputData
transpose 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
transpose' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateDivTensorFlow.GenOps.Core
truncateDiv'TensorFlow.GenOps.Core
truncatedNormal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncatedNormal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateModTensorFlow.GenOps.Core
truncateMod'TensorFlow.GenOps.Core
type' 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
typeAttrProto.Tensorflow.Core.Framework.OpDef
TypeErrorTensorFlow.Types
typeHintProto.Tensorflow.Core.Framework.Summary
typeListAttrProto.Tensorflow.Core.Framework.OpDef
TypeParam 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
typeParamIsListTensorFlow.OpGen.ParsedOp
typeParamRestrictionsTensorFlow.OpGen.ParsedOp
unControlNodeTensorFlow.Output, TensorFlow.Build
unHaskellNameTensorFlow.OpGen.ParsedOp
uniformCandidateSamplerTensorFlow.GenOps.Core
uniformCandidateSampler'TensorFlow.GenOps.Core
UniqueTensorFlow.Build
uniqueTensorFlow.GenOps.Core
unique'TensorFlow.GenOps.Core
uniqueWithCountsTensorFlow.GenOps.Core
uniqueWithCounts'TensorFlow.GenOps.Core
unknownRankProto.Tensorflow.Core.Framework.TensorShape
unNodeNameTensorFlow.Output
unOpTypeTensorFlow.Output
unOutputIxTensorFlow.Output
unpackTensorFlow.GenOps.Core
unpack'TensorFlow.GenOps.Core
unScalarTensorFlow.Types, TensorFlow.Core
unsortedSegmentSumTensorFlow.GenOps.Core
unsortedSegmentSum'TensorFlow.GenOps.Core
unstageTensorFlow.GenOps.Core
unstage'TensorFlow.GenOps.Core
unTensorDataTensorFlow.Types
unTFNameTensorFlow.OpGen.ParsedOp
usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
useProtoAsVoidPtrLenTensorFlow.Internal.FFI
useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
Value 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
value 
1 (Function)TensorFlow.Tensor, TensorFlow.Core
2 (Function)Proto.Tensorflow.Core.Protobuf.Config
3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
5 (Function)Proto.Tensorflow.Core.Framework.Summary
varHandleOpTensorFlow.GenOps.Core
varHandleOp'TensorFlow.GenOps.Core
variable 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
variable' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
variableV2TensorFlow.GenOps.Core
variableV2'TensorFlow.GenOps.Core
varIsInitializedOpTensorFlow.GenOps.Core
varIsInitializedOp'TensorFlow.GenOps.Core
vectorTensorFlow.Ops
vector'TensorFlow.Ops
version 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
versionNumberProto.Tensorflow.Core.Framework.Tensor
versionsProto.Tensorflow.Core.Framework.Graph
visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
wallTimeProto.Tensorflow.Core.Util.Event
where'TensorFlow.GenOps.Core
where''TensorFlow.GenOps.Core
wholeFileReaderTensorFlow.GenOps.Core
wholeFileReader'TensorFlow.GenOps.Core
wholeFileReaderV2TensorFlow.GenOps.Core
wholeFileReaderV2'TensorFlow.GenOps.Core
widthProto.Tensorflow.Core.Framework.Summary
withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
withDeviceTensorFlow.Build, TensorFlow.Core
withEventWriterTensorFlow.Logging
withNameScopeTensorFlow.Build, TensorFlow.Core
withNodeDependenciesTensorFlow.Build
withSessionTensorFlow.Internal.FFI
withStateLensTensorFlow.Build
writeFileTensorFlow.GenOps.Core
writeFile'TensorFlow.GenOps.Core
wtsCkptTensorFlow.Examples.MNIST.TrainedGraph
zeroInitializedVariableTensorFlow.Ops
zeroInitializedVariable'TensorFlow.Ops
zerosTensorFlow.Ops
zerosLike 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zerosLike' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zetaTensorFlow.GenOps.Core
zeta'TensorFlow.GenOps.Core
\\TensorFlow.Types
_ArgTensorFlow.GenOps.Core
_Arg'TensorFlow.GenOps.Core
_ArrayToListTensorFlow.GenOps.Core
_ArrayToList'TensorFlow.GenOps.Core
_AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
_AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
_ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
_Event'fileVersionProto.Tensorflow.Core.Util.Event
_Event'graphDefProto.Tensorflow.Core.Util.Event
_Event'logMessageProto.Tensorflow.Core.Util.Event
_Event'metaGraphDefProto.Tensorflow.Core.Util.Event
_Event'sessionLogProto.Tensorflow.Core.Util.Event
_Event'stepProto.Tensorflow.Core.Util.Event
_Event'summaryProto.Tensorflow.Core.Util.Event
_Event'taggedRunMetadataProto.Tensorflow.Core.Util.Event
_Event'wallTimeProto.Tensorflow.Core.Util.Event
_GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
_GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
_GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
_GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionProto.Tensorflow.Core.Framework.Graph
_GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
_GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
_GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
_HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
_HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
_HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
_HistogramProto'minProto.Tensorflow.Core.Framework.Summary
_HistogramProto'numProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
_HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
_HostCastTensorFlow.GenOps.Core
_HostCast'TensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostRecv'TensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_HostSend'TensorFlow.GenOps.Core
_ListToArrayTensorFlow.GenOps.Core
_ListToArray'TensorFlow.GenOps.Core
_LogMessage'levelProto.Tensorflow.Core.Util.Event
_LogMessage'messageProto.Tensorflow.Core.Util.Event
_NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
_NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
_NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
_NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
_OpDef'attrProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
_OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
_OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
_OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
_OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
_OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
_OpDef'nameProto.Tensorflow.Core.Framework.OpDef
_OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
_OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
_OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
_opInputsTensorFlow.Output
_OpList'opProto.Tensorflow.Core.Framework.OpDef
_opNameTensorFlow.Output
_OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
_OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
_opTypeTensorFlow.Output
_ParallelConcatStartTensorFlow.GenOps.Core
_ParallelConcatStart'TensorFlow.GenOps.Core
_ParallelConcatUpdateTensorFlow.GenOps.Core
_ParallelConcatUpdate'TensorFlow.GenOps.Core
_RecvTensorFlow.GenOps.Core
_Recv'TensorFlow.GenOps.Core
_ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
_ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
_RetvalTensorFlow.GenOps.Core
_Retval'TensorFlow.GenOps.Core
_RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
_RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
_RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
_SendTensorFlow.GenOps.Core
_Send'TensorFlow.GenOps.Core
_SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
_SessionLog'msgProto.Tensorflow.Core.Util.Event
_SessionLog'statusProto.Tensorflow.Core.Util.Event
_Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
_Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
_Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
_Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
_Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
_Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
_Summary'valueProto.Tensorflow.Core.Framework.Summary
_Summary'Value'audioProto.Tensorflow.Core.Framework.Summary
_Summary'Value'histoProto.Tensorflow.Core.Framework.Summary
_Summary'Value'imageProto.Tensorflow.Core.Framework.Summary
_Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
_Summary'Value'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
_Summary'Value'simpleValueProto.Tensorflow.Core.Framework.Summary
_Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
_Summary'Value'tensorProto.Tensorflow.Core.Framework.Summary
_SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
_TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
_TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
_TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
_TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
_TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
_TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
_TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
_ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file diff --git a/docs/haddock/doc-index-B.html b/docs/haddock/doc-index-B.html index e78a069..425f2a2 100644 --- a/docs/haddock/doc-index-B.html +++ b/docs/haddock/doc-index-B.html @@ -1,4 +1,4 @@ (Index - B)

 

Index - B

bProto.Tensorflow.Core.Framework.AttrValue
barrierTensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasCkptTensorFlow.Examples.MNIST.TrainedGraph
bitcastTensorFlow.GenOps.Core
boolValProto.Tensorflow.Core.Framework.Tensor
broadcastGradientArgs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
BuildTensorFlow.Build, TensorFlow.Core
buildTensorFlow.Session, TensorFlow.Core
buildAndTensorFlow.Session, TensorFlow.Core
buildCostModelProto.Tensorflow.Core.Protobuf.Config
buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
buildListOpTensorFlow.BuildOp
BuildOpTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildTTensorFlow.Build, TensorFlow.Core
buildWithSummaryTensorFlow.Session, TensorFlow.Core
\ No newline at end of file +

 

Index - B

bProto.Tensorflow.Core.Framework.AttrValue
barrierTensorFlow.GenOps.Core
barrier'TensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierClose'TensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierIncompleteSize'TensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierInsertMany'TensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
barrierReadySize'TensorFlow.GenOps.Core
barrierTakeManyTensorFlow.GenOps.Core
barrierTakeMany'TensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholesky'TensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchCholeskyGrad'TensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT'TensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT2D'TensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchFFT3D'TensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT'TensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT2D'TensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchIFFT3D'TensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatMul'TensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixBandPart'TensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDeterminant'TensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiag'TensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixDiagPart'TensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixInverse'TensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSetDiag'TensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolve'TensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixSolveLs'TensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchMatrixTriangularSolve'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEig'TensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSelfAdjointEigV2'TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchSvd'TensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpace'TensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
batchToSpaceND'TensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
betainc'TensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAdd'TensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddGrad'TensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasAddV1'TensorFlow.GenOps.Core
biasCkptTensorFlow.Examples.MNIST.TrainedGraph
bitcastTensorFlow.GenOps.Core
bitcast'TensorFlow.GenOps.Core
boolValProto.Tensorflow.Core.Framework.Tensor
broadcastArgsTensorFlow.GenOps.Core
broadcastArgs'TensorFlow.GenOps.Core
broadcastGradientArgs 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
broadcastGradientArgs' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
bucketProto.Tensorflow.Core.Framework.Summary
bucketLimitProto.Tensorflow.Core.Framework.Summary
BuildTensorFlow.Build, TensorFlow.Core
buildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
buildCostModelProto.Tensorflow.Core.Protobuf.Config
buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
BuildInputsTensorFlow.BuildOp
buildInputsTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildResultTensorFlow.BuildOp
buildResultTensorFlow.BuildOp
BuildTTensorFlow.Build, TensorFlow.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-C.html b/docs/haddock/doc-index-C.html index 57b6593..7f91a62 100644 --- a/docs/haddock/doc-index-C.html +++ b/docs/haddock/doc-index-C.html @@ -1,4 +1,4 @@ (Index - C)

 

Index - C

camelCaseTensorFlow.OpGen.ParsedOp
cast 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
ceilTensorFlow.GenOps.Core
checkEndianTensorFlow.Examples.MNIST.Parse
checkNumericsTensorFlow.GenOps.Core
choleskyTensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
collectAllSummariesTensorFlow.Build
colocateWithTensorFlow.Build, TensorFlow.Core
complexTensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
concat 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concatOffsetTensorFlow.GenOps.Core
concatV2TensorFlow.GenOps.Core
ConfigProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
ConfigProto'DeviceCountEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
conjTensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
constantTensorFlow.Ops
containerProto.Tensorflow.Core.Framework.ResourceHandle
ControlNode 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Build
2 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
controlTriggerTensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
copyTensorFlow.GenOps.Core
copyHostTensorFlow.GenOps.Core
cosTensorFlow.GenOps.Core
costGraphProto.Tensorflow.Core.Protobuf.Config
countUpToTensorFlow.GenOps.Core
createVariableOpTensorFlow.GenOps.Core
cropAndResizeTensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - C

camelCaseTensorFlow.OpGen.ParsedOp
cast 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
cast' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
ceilTensorFlow.GenOps.Core
ceil'TensorFlow.GenOps.Core
checkEndianTensorFlow.Examples.MNIST.Parse
checkNumericsTensorFlow.GenOps.Core
checkNumerics'TensorFlow.GenOps.Core
checkpointPathProto.Tensorflow.Core.Util.Event
choleskyTensorFlow.GenOps.Core
cholesky'TensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
choleskyGrad'TensorFlow.GenOps.Core
collectAllSummariesTensorFlow.Tensor
colocateWithTensorFlow.Tensor, TensorFlow.Core
colorspaceProto.Tensorflow.Core.Framework.Summary
complexTensorFlow.GenOps.Core
complex'TensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
complexAbs'TensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
computeAccidentalHits'TensorFlow.GenOps.Core
concat 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concat' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
concatOffsetTensorFlow.GenOps.Core
concatOffset'TensorFlow.GenOps.Core
concatV2TensorFlow.GenOps.Core
concatV2'TensorFlow.GenOps.Core
conditionalAccumulatorTensorFlow.GenOps.Core
conditionalAccumulator'TensorFlow.GenOps.Core
ConfigProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
ConfigProto'DeviceCountEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
conjTensorFlow.GenOps.Core
conj'TensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
const'TensorFlow.GenOps.Core
constantTensorFlow.Ops
constant'TensorFlow.Ops
containerProto.Tensorflow.Core.Framework.ResourceHandle
contentTypeProto.Tensorflow.Core.Framework.Summary
ControlNode 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Build
2 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
controlTriggerTensorFlow.GenOps.Core
controlTrigger'TensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2D'TensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropFilter'TensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv2DBackpropInput'TensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3D'TensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilter'TensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropFilterV2'TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInput'TensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
conv3DBackpropInputV2'TensorFlow.GenOps.Core
copyTensorFlow.GenOps.Core
copy'TensorFlow.GenOps.Core
copyHostTensorFlow.GenOps.Core
copyHost'TensorFlow.GenOps.Core
cosTensorFlow.GenOps.Core
cos'TensorFlow.GenOps.Core
costGraphProto.Tensorflow.Core.Protobuf.Config
countUpToTensorFlow.GenOps.Core
countUpTo'TensorFlow.GenOps.Core
cropAndResizeTensorFlow.GenOps.Core
cropAndResize'TensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradBoxes'TensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
cropAndResizeGradImage'TensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cross'TensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCBeamSearchDecoder'TensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCGreedyDecoder'TensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cTCLoss'TensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumprod'TensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
cumsum'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-D.html b/docs/haddock/doc-index-D.html index 7285622..cdd4c2c 100644 --- a/docs/haddock/doc-index-D.html +++ b/docs/haddock/doc-index-D.html @@ -1,4 +1,4 @@ (Index - D)

 

Index - D

DataTypeProto.Tensorflow.Core.Framework.Types
dcomplexValProto.Tensorflow.Core.Framework.Tensor
debugIdentityTensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
debugOpsProto.Tensorflow.Core.Protobuf.Config
DebugTensorWatch 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
debugUrlsProto.Tensorflow.Core.Protobuf.Config
decodeBase64TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeTensorDataTensorFlow.Types, TensorFlow.Core
defaultValueProto.Tensorflow.Core.Framework.OpDef
deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
DeleteTensorFlow.Types
deleteSessionTensorTensorFlow.GenOps.Core
deprecationProto.Tensorflow.Core.Framework.OpDef
depthToSpaceTensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
dequeueTensorFlow.Queue
descriptionProto.Tensorflow.Core.Framework.OpDef
deserializeManySparseTensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
Device 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Core
2 (Type/Class)TensorFlow.Output, TensorFlow.Core
device 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
deviceCountProto.Tensorflow.Core.Protobuf.Config
deviceFiltersProto.Tensorflow.Core.Protobuf.Config
deviceNameTensorFlow.Output, TensorFlow.Core
diagTensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dimProto.Tensorflow.Core.Framework.TensorShape
divTensorFlow.GenOps.Core
doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
docOpListTensorFlow.OpGen
doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
doubleValProto.Tensorflow.Core.Framework.Tensor
drawBoundingBoxesTensorFlow.GenOps.Core
drawMNISTTensorFlow.Examples.MNIST.Parse
dtypeProto.Tensorflow.Core.Framework.Tensor
DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
DT_BOOLProto.Tensorflow.Core.Framework.Types
DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
DT_DOUBLEProto.Tensorflow.Core.Framework.Types
DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
DT_FLOATProto.Tensorflow.Core.Framework.Types
DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
DT_HALFProto.Tensorflow.Core.Framework.Types
DT_HALF_REFProto.Tensorflow.Core.Framework.Types
DT_INT16Proto.Tensorflow.Core.Framework.Types
DT_INT16_REFProto.Tensorflow.Core.Framework.Types
DT_INT32Proto.Tensorflow.Core.Framework.Types
DT_INT32_REFProto.Tensorflow.Core.Framework.Types
DT_INT64Proto.Tensorflow.Core.Framework.Types
DT_INT64_REFProto.Tensorflow.Core.Framework.Types
DT_INT8Proto.Tensorflow.Core.Framework.Types
DT_INT8_REFProto.Tensorflow.Core.Framework.Types
DT_INVALIDProto.Tensorflow.Core.Framework.Types
DT_QINT16Proto.Tensorflow.Core.Framework.Types
DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
DT_QINT32Proto.Tensorflow.Core.Framework.Types
DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
DT_QINT8Proto.Tensorflow.Core.Framework.Types
DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
DT_QUINT16Proto.Tensorflow.Core.Framework.Types
DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
DT_QUINT8Proto.Tensorflow.Core.Framework.Types
DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
DT_RESOURCEProto.Tensorflow.Core.Framework.Types
DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
DT_STRINGProto.Tensorflow.Core.Framework.Types
DT_STRING_REFProto.Tensorflow.Core.Framework.Types
DT_UINT16Proto.Tensorflow.Core.Framework.Types
DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
DT_UINT8Proto.Tensorflow.Core.Framework.Types
DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
dynamicPartitionTensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - D

DataType 
1 (Type/Class)TensorFlow.Types
2 (Type/Class)Proto.Tensorflow.Core.Framework.Types
dcomplexValProto.Tensorflow.Core.Framework.Tensor
debugIdentityTensorFlow.GenOps.Core
debugIdentity'TensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
debugNanCount'TensorFlow.GenOps.Core
debugNumericSummaryTensorFlow.GenOps.Core
debugNumericSummary'TensorFlow.GenOps.Core
debugOptionsProto.Tensorflow.Core.Protobuf.Config
decodeBase64TensorFlow.GenOps.Core
decodeBase64'TensorFlow.GenOps.Core
decodeCSVTensorFlow.GenOps.Core
decodeCSV'TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeGif'TensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJpeg'TensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodeJSONExample'TensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodePng'TensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeRaw'TensorFlow.GenOps.Core
decodeTensorDataTensorFlow.Types, TensorFlow.Core
decodeTFRecordsTensorFlow.Records.Conduit
defaultValueProto.Tensorflow.Core.Framework.OpDef
deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
DeleteTensorFlow.Types
deleteSessionTensorTensorFlow.GenOps.Core
deleteSessionTensor'TensorFlow.GenOps.Core
denseToDenseSetOperationTensorFlow.GenOps.Core
denseToDenseSetOperation'TensorFlow.GenOps.Core
denseToSparseSetOperationTensorFlow.GenOps.Core
denseToSparseSetOperation'TensorFlow.GenOps.Core
deprecationProto.Tensorflow.Core.Framework.OpDef
depthToSpaceTensorFlow.GenOps.Core
depthToSpace'TensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNative'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
dequantize'TensorFlow.GenOps.Core
dequeueTensorFlow.Queue
descriptionProto.Tensorflow.Core.Framework.OpDef
deserializeManySparseTensorFlow.GenOps.Core
deserializeManySparse'TensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
destroyTemporaryVariable'TensorFlow.GenOps.Core
Device 
1 (Data Constructor)TensorFlow.Output, TensorFlow.Core
2 (Type/Class)TensorFlow.Output, TensorFlow.Core
device 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
deviceCountProto.Tensorflow.Core.Protobuf.Config
deviceFiltersProto.Tensorflow.Core.Protobuf.Config
deviceNameTensorFlow.Output, TensorFlow.Core
diagTensorFlow.GenOps.Core
diag'TensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
diagPart'TensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
digamma'TensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2D'TensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropFilter'TensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dilation2DBackpropInput'TensorFlow.GenOps.Core
dimProto.Tensorflow.Core.Framework.TensorShape
divTensorFlow.GenOps.Core
div'TensorFlow.GenOps.Core
doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
docOpListTensorFlow.OpGen
doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
doubleValProto.Tensorflow.Core.Framework.Tensor
drawBoundingBoxesTensorFlow.GenOps.Core
drawBoundingBoxes'TensorFlow.GenOps.Core
drawMNISTTensorFlow.Examples.MNIST.Parse
dtypeProto.Tensorflow.Core.Framework.Tensor
DT_BFLOAT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BFLOAT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_BOOL_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX128_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_COMPLEX64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_DOUBLE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_FLOAT_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_HALF_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT64_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_INVALID 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT32_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_QUINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_RESOURCE_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_STRING_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT16_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
DT_UINT8_REF 
1 (Data Constructor)TensorFlow.Types
2 (Data Constructor)Proto.Tensorflow.Core.Framework.Types
dynamicPartitionTensorFlow.GenOps.Core
dynamicPartition'TensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
dynamicStitch'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-E.html b/docs/haddock/doc-index-E.html index 2d6a3cf..9433bf4 100644 --- a/docs/haddock/doc-index-E.html +++ b/docs/haddock/doc-index-E.html @@ -1,4 +1,4 @@ (Index - E)

 

Index - E

editDistanceTensorFlow.GenOps.Core
eluTensorFlow.GenOps.Core
eluGradTensorFlow.GenOps.Core
embeddingLookupTensorFlow.EmbeddingOps
enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
encodeBase64TensorFlow.GenOps.Core
encodeJpegTensorFlow.GenOps.Core
encodePngTensorFlow.GenOps.Core
encodeTensorDataTensorFlow.Types, TensorFlow.Core
enqueueTensorFlow.Queue
enterTensorFlow.GenOps.Core
eqLengthGuardTensorFlow.BuildOp
equal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
erfTensorFlow.GenOps.Core
erfcTensorFlow.GenOps.Core
evalBuildTTensorFlow.Build
ExcludedCaseTensorFlow.Types
excludeListTensorFlow.OpGen
exitTensorFlow.GenOps.Core
expTensorFlow.GenOps.Core
expandDims 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
explanationProto.Tensorflow.Core.Framework.OpDef
explicitInputAttrsTensorFlow.OpGen.ParsedOp
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
extractGlimpseTensorFlow.GenOps.Core
extractImagePatchesTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - E

editDistanceTensorFlow.GenOps.Core
editDistance'TensorFlow.GenOps.Core
eluTensorFlow.GenOps.Core
elu'TensorFlow.GenOps.Core
eluGradTensorFlow.GenOps.Core
eluGrad'TensorFlow.GenOps.Core
embeddingLookupTensorFlow.EmbeddingOps
enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
encodeBase64TensorFlow.GenOps.Core
encodeBase64'TensorFlow.GenOps.Core
encodedAudioStringProto.Tensorflow.Core.Framework.Summary
encodedImageStringProto.Tensorflow.Core.Framework.Summary
encodeJpegTensorFlow.GenOps.Core
encodeJpeg'TensorFlow.GenOps.Core
encodeOutputTensorFlow.Build
encodePngTensorFlow.GenOps.Core
encodePng'TensorFlow.GenOps.Core
encodeTensorDataTensorFlow.Types, TensorFlow.Core
encodeTFRecordsTensorFlow.Records.Conduit
enqueueTensorFlow.Queue
enterTensorFlow.GenOps.Core
enter'TensorFlow.GenOps.Core
eqLengthGuardTensorFlow.BuildOp
equal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
equal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
erfTensorFlow.GenOps.Core
erf'TensorFlow.GenOps.Core
erfcTensorFlow.GenOps.Core
erfc'TensorFlow.GenOps.Core
evalBuildTTensorFlow.Build
Event 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
EventWriterTensorFlow.Logging
ExcludedCaseTensorFlow.Types
excludeListTensorFlow.OpGen
exitTensorFlow.GenOps.Core
exit'TensorFlow.GenOps.Core
expTensorFlow.GenOps.Core
exp'TensorFlow.GenOps.Core
expandDims 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
expandDims' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
explanationProto.Tensorflow.Core.Framework.OpDef
explicitInputAttrsTensorFlow.OpGen.ParsedOp
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
expm1TensorFlow.GenOps.Core
expm1'TensorFlow.GenOps.Core
exprTensorFlow.Tensor, TensorFlow.Core
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
extractGlimpseTensorFlow.GenOps.Core
extractGlimpse'TensorFlow.GenOps.Core
extractImagePatchesTensorFlow.GenOps.Core
extractImagePatches'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-F.html b/docs/haddock/doc-index-F.html index b5d3d9d..a461543 100644 --- a/docs/haddock/doc-index-F.html +++ b/docs/haddock/doc-index-F.html @@ -1,4 +1,4 @@ (Index - F)

 

Index - F

fProto.Tensorflow.Core.Framework.AttrValue
factTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
Feed 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Data Constructor)TensorFlow.Nodes
2 (Type/Class)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorListTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
fFTTensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fill 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
flagParserTensorFlow.OpGen
floatValProto.Tensorflow.Core.Framework.Tensor
floorTensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
funcProto.Tensorflow.Core.Framework.AttrValue
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - F

fProto.Tensorflow.Core.Framework.AttrValue
factTensorFlow.GenOps.Core
fact'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
fakeQueueTensorFlow.GenOps.Core
fakeQueue'TensorFlow.GenOps.Core
Feed 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Data Constructor)TensorFlow.Nodes
2 (Type/Class)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
fFTTensorFlow.GenOps.Core
fFT'TensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT2D'TensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fFT3D'TensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fIFOQueue'TensorFlow.GenOps.Core
fIFOQueueV2TensorFlow.GenOps.Core
fIFOQueueV2'TensorFlow.GenOps.Core
fileVersionProto.Tensorflow.Core.Util.Event
fill 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fill' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedLengthRecordReader'TensorFlow.GenOps.Core
fixedLengthRecordReaderV2TensorFlow.GenOps.Core
fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
flagParserTensorFlow.OpGen
floatValProto.Tensorflow.Core.Framework.Tensor
floorTensorFlow.GenOps.Core
floor'TensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorDiv'TensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
floorMod'TensorFlow.GenOps.Core
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPool'TensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalAvgPoolGrad'TensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPool'TensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolGrad'TensorFlow.GenOps.Core
fromTensorTypeListTensorFlow.Types
fromTensorTypesTensorFlow.Types
funcProto.Tensorflow.Core.Framework.AttrValue
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNorm'TensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedBatchNormGrad'TensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedPadConv2D'TensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-G.html b/docs/haddock/doc-index-G.html index 7a477b3..2cecaf0 100644 --- a/docs/haddock/doc-index-G.html +++ b/docs/haddock/doc-index-G.html @@ -1,4 +1,4 @@ (Index - G)

 

Index - G

gatherTensorFlow.GenOps.Core
gatherNdTensorFlow.GenOps.Core
getAllOpListTensorFlow.Internal.FFI
getFetchTensorFlow.Nodes
getNodesTensorFlow.Nodes
getOrAddOpTensorFlow.Build
getSessionHandleTensorFlow.GenOps.Core
getSessionTensorTensorFlow.GenOps.Core
getVarIntTensorFlow.Internal.VarInt
googleTestGoogle.Test
GPUOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
gpuOptionsProto.Tensorflow.Core.Protobuf.Config
gradientsTensorFlow.Gradient
GraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
2 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
GraphOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
graphOptionsProto.Tensorflow.Core.Protobuf.Config
GraphStateTensorFlow.Build
greaterTensorFlow.GenOps.Core
greaterEqualTensorFlow.GenOps.Core
groupTensorFlow.ControlFlow, TensorFlow.Core
\ No newline at end of file +

 

Index - G

gatherTensorFlow.GenOps.Core
gather'TensorFlow.GenOps.Core
gatherNdTensorFlow.GenOps.Core
gatherNd'TensorFlow.GenOps.Core
getAllOpListTensorFlow.Internal.FFI
getFetchTensorFlow.Nodes
getNodesTensorFlow.Nodes
getOrAddOpTensorFlow.Build
getSessionHandleTensorFlow.GenOps.Core
getSessionHandle'TensorFlow.GenOps.Core
getSessionTensorTensorFlow.GenOps.Core
getSessionTensor'TensorFlow.GenOps.Core
getTFRecordTensorFlow.Records
getTFRecordDataTensorFlow.Records
getTFRecordLengthTensorFlow.Records
getTFRecordsTensorFlow.Records
getVarIntTensorFlow.Internal.VarInt
globalJitLevelProto.Tensorflow.Core.Protobuf.Config
GPUOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
gpuOptionsProto.Tensorflow.Core.Protobuf.Config
gradientsTensorFlow.Gradient
GraphDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
2 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
graphDefProto.Tensorflow.Core.Util.Event
GraphOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
graphOptionsProto.Tensorflow.Core.Protobuf.Config
GraphStateTensorFlow.Build
greaterTensorFlow.GenOps.Core
greater'TensorFlow.GenOps.Core
greaterEqualTensorFlow.GenOps.Core
greaterEqual'TensorFlow.GenOps.Core
groupTensorFlow.ControlFlow, TensorFlow.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-H.html b/docs/haddock/doc-index-H.html index 699071f..2d76199 100644 --- a/docs/haddock/doc-index-H.html +++ b/docs/haddock/doc-index-H.html @@ -1,4 +1,4 @@ (Index - H)

 

Index - H

halfValProto.Tensorflow.Core.Framework.Tensor
hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
HaskellName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
haskellNameTensorFlow.OpGen.ParsedOp
hasMinimumProto.Tensorflow.Core.Framework.OpDef
histogramSummaryTensorFlow.GenOps.Core
hoistBuildTTensorFlow.Build
hSVToRGBTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - H

halfValProto.Tensorflow.Core.Framework.Tensor
hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
hashTableTensorFlow.GenOps.Core
hashTable'TensorFlow.GenOps.Core
HaskellName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
haskellNameTensorFlow.OpGen.ParsedOp
hasMinimumProto.Tensorflow.Core.Framework.OpDef
heightProto.Tensorflow.Core.Framework.Summary
histoProto.Tensorflow.Core.Framework.Summary
HistogramProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
histogramSummary 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Logging
histogramSummary'TensorFlow.GenOps.Core
hoistBuildTTensorFlow.Build
hSVToRGBTensorFlow.GenOps.Core
hSVToRGB'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-I.html b/docs/haddock/doc-index-I.html index 92ed0dc..832c83a 100644 --- a/docs/haddock/doc-index-I.html +++ b/docs/haddock/doc-index-I.html @@ -1,4 +1,4 @@ (Index - I)

 

Index - I

iProto.Tensorflow.Core.Framework.AttrValue
identity 
1 (Function)TensorFlow.ControlFlow, TensorFlow.Core
2 (Function)TensorFlow.GenOps.Core
identityReaderTensorFlow.GenOps.Core
iFFTTensorFlow.GenOps.Core
iFFT2DTensorFlow.GenOps.Core
iFFT3DTensorFlow.GenOps.Core
igammaTensorFlow.GenOps.Core
igammacTensorFlow.GenOps.Core
imagTensorFlow.GenOps.Core
imageSummaryTensorFlow.GenOps.Core
immutableConstTensorFlow.GenOps.Core
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
inferredTypeAttrsTensorFlow.OpGen.ParsedOp
inferShapesProto.Tensorflow.Core.Protobuf.Config
initializedVariableTensorFlow.Ops
initializeTableTensorFlow.GenOps.Core
initializeTableFromTextFileTensorFlow.GenOps.Core
inputProto.Tensorflow.Core.Framework.NodeDef
inputArgProto.Tensorflow.Core.Framework.OpDef
int64ValProto.Tensorflow.Core.Framework.Tensor
interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
inTopKTensorFlow.GenOps.Core
intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
intValProto.Tensorflow.Core.Framework.Tensor
invTensorFlow.GenOps.Core
invertPermutationTensorFlow.GenOps.Core
invGradTensorFlow.GenOps.Core
isAggregateProto.Tensorflow.Core.Framework.OpDef
isCommutativeProto.Tensorflow.Core.Framework.OpDef
isFiniteTensorFlow.GenOps.Core
isInfTensorFlow.GenOps.Core
isNanTensorFlow.GenOps.Core
isRefProto.Tensorflow.Core.Framework.OpDef
isStatefulProto.Tensorflow.Core.Framework.OpDef
isVariableInitializedTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - I

iProto.Tensorflow.Core.Framework.AttrValue
identity 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identity' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
identityReaderTensorFlow.GenOps.Core
identityReader'TensorFlow.GenOps.Core
identityReaderV2TensorFlow.GenOps.Core
identityReaderV2'TensorFlow.GenOps.Core
iFFTTensorFlow.GenOps.Core
iFFT'TensorFlow.GenOps.Core
iFFT2DTensorFlow.GenOps.Core
iFFT2D'TensorFlow.GenOps.Core
iFFT3DTensorFlow.GenOps.Core
iFFT3D'TensorFlow.GenOps.Core
igammaTensorFlow.GenOps.Core
igamma'TensorFlow.GenOps.Core
igammacTensorFlow.GenOps.Core
igammac'TensorFlow.GenOps.Core
imagTensorFlow.GenOps.Core
imag'TensorFlow.GenOps.Core
imageProto.Tensorflow.Core.Framework.Summary
imageSummaryTensorFlow.GenOps.Core
imageSummary'TensorFlow.GenOps.Core
immutableConstTensorFlow.GenOps.Core
immutableConst'TensorFlow.GenOps.Core
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
inferredTypeAttrsTensorFlow.OpGen.ParsedOp
inferShapesProto.Tensorflow.Core.Protobuf.Config
initializedVariableTensorFlow.Ops
initializedVariable'TensorFlow.Ops
initializeTableTensorFlow.GenOps.Core
initializeTable'TensorFlow.GenOps.Core
initializeTableFromTextFileTensorFlow.GenOps.Core
initializeTableFromTextFile'TensorFlow.GenOps.Core
inputProto.Tensorflow.Core.Framework.NodeDef
inputArgProto.Tensorflow.Core.Framework.OpDef
int64ValProto.Tensorflow.Core.Framework.Tensor
interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
inTopKTensorFlow.GenOps.Core
inTopK'TensorFlow.GenOps.Core
intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
intValProto.Tensorflow.Core.Framework.Tensor
invTensorFlow.GenOps.Core
inv'TensorFlow.GenOps.Core
invertPermutationTensorFlow.GenOps.Core
invertPermutation'TensorFlow.GenOps.Core
invGradTensorFlow.GenOps.Core
invGrad'TensorFlow.GenOps.Core
isAggregateProto.Tensorflow.Core.Framework.OpDef
isCommutativeProto.Tensorflow.Core.Framework.OpDef
isFiniteTensorFlow.GenOps.Core
isFinite'TensorFlow.GenOps.Core
isInfTensorFlow.GenOps.Core
isInf'TensorFlow.GenOps.Core
isNanTensorFlow.GenOps.Core
isNan'TensorFlow.GenOps.Core
isRefProto.Tensorflow.Core.Framework.OpDef
isStatefulProto.Tensorflow.Core.Framework.OpDef
isVariableInitializedTensorFlow.GenOps.Core
isVariableInitialized'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-K.html b/docs/haddock/doc-index-K.html index d84e5d6..95697fc 100644 --- a/docs/haddock/doc-index-K.html +++ b/docs/haddock/doc-index-K.html @@ -1,4 +1,4 @@ (Index - K)

 

Index - K

key 
1 (Function)Proto.Tensorflow.Core.Protobuf.Config
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
\ No newline at end of file +

 

Index - K

key 
1 (Function)Proto.Tensorflow.Core.Protobuf.Config
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
\ No newline at end of file diff --git a/docs/haddock/doc-index-L.html b/docs/haddock/doc-index-L.html index bc49877..b03681b 100644 --- a/docs/haddock/doc-index-L.html +++ b/docs/haddock/doc-index-L.html @@ -1,4 +1,4 @@ (Index - L)

 

Index - L

l2LossTensorFlow.GenOps.Core
learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
lessTensorFlow.GenOps.Core
lessEqualTensorFlow.GenOps.Core
lgammaTensorFlow.GenOps.Core
libraryProto.Tensorflow.Core.Framework.Graph
linSpaceTensorFlow.GenOps.Core
listProto.Tensorflow.Core.Framework.AttrValue
ListArgTensorFlow.OpGen.ParsedOp
listDiffTensorFlow.GenOps.Core
logTensorFlow.GenOps.Core
log1pTensorFlow.GenOps.Core
logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
logicalAndTensorFlow.GenOps.Core
logicalNotTensorFlow.GenOps.Core
logicalOrTensorFlow.GenOps.Core
logSoftmaxTensorFlow.GenOps.Core
logUniformCandidateSamplerTensorFlow.GenOps.Core
lookupTableExportTensorFlow.GenOps.Core
lookupTableFindTensorFlow.GenOps.Core
lookupTableImportTensorFlow.GenOps.Core
lookupTableInsertTensorFlow.GenOps.Core
lookupTableSizeTensorFlow.GenOps.Core
loopCondTensorFlow.GenOps.Core
lRNTensorFlow.GenOps.Core
lRNGradTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - L

l2LossTensorFlow.GenOps.Core
l2Loss'TensorFlow.GenOps.Core
learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
learnedUnigramCandidateSampler'TensorFlow.GenOps.Core
lengthFramesProto.Tensorflow.Core.Framework.Summary
lessTensorFlow.GenOps.Core
less'TensorFlow.GenOps.Core
lessEqualTensorFlow.GenOps.Core
lessEqual'TensorFlow.GenOps.Core
levelProto.Tensorflow.Core.Util.Event
lgammaTensorFlow.GenOps.Core
lgamma'TensorFlow.GenOps.Core
libraryProto.Tensorflow.Core.Framework.Graph
linSpaceTensorFlow.GenOps.Core
linSpace'TensorFlow.GenOps.Core
ListTensorFlow.Types
listProto.Tensorflow.Core.Framework.AttrValue
ListArgTensorFlow.OpGen.ParsedOp
listDiffTensorFlow.GenOps.Core
listDiff'TensorFlow.GenOps.Core
ListOfTensorFlow.Types
logTensorFlow.GenOps.Core
log'TensorFlow.GenOps.Core
log1pTensorFlow.GenOps.Core
log1p'TensorFlow.GenOps.Core
logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
logEventTensorFlow.Logging
logicalAndTensorFlow.GenOps.Core
logicalAnd'TensorFlow.GenOps.Core
logicalNotTensorFlow.GenOps.Core
logicalNot'TensorFlow.GenOps.Core
logicalOrTensorFlow.GenOps.Core
logicalOr'TensorFlow.GenOps.Core
LogMessage 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
logMessageProto.Tensorflow.Core.Util.Event
LogMessage'DEBUGProto.Tensorflow.Core.Util.Event
LogMessage'ERRORProto.Tensorflow.Core.Util.Event
LogMessage'FATALProto.Tensorflow.Core.Util.Event
LogMessage'INFOProto.Tensorflow.Core.Util.Event
LogMessage'LevelProto.Tensorflow.Core.Util.Event
LogMessage'UNKNOWNProto.Tensorflow.Core.Util.Event
LogMessage'WARNProto.Tensorflow.Core.Util.Event
logSoftmaxTensorFlow.GenOps.Core
logSoftmax'TensorFlow.GenOps.Core
logSummaryTensorFlow.Logging
logUniformCandidateSamplerTensorFlow.GenOps.Core
logUniformCandidateSampler'TensorFlow.GenOps.Core
lookupNodeTensorFlow.Build
lookupTableExportTensorFlow.GenOps.Core
lookupTableExport'TensorFlow.GenOps.Core
lookupTableFindTensorFlow.GenOps.Core
lookupTableFind'TensorFlow.GenOps.Core
lookupTableImportTensorFlow.GenOps.Core
lookupTableImport'TensorFlow.GenOps.Core
lookupTableInsertTensorFlow.GenOps.Core
lookupTableInsert'TensorFlow.GenOps.Core
lookupTableSizeTensorFlow.GenOps.Core
lookupTableSize'TensorFlow.GenOps.Core
loopCondTensorFlow.GenOps.Core
loopCond'TensorFlow.GenOps.Core
lRNTensorFlow.GenOps.Core
lRN'TensorFlow.GenOps.Core
lRNGradTensorFlow.GenOps.Core
lRNGrad'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-M.html b/docs/haddock/doc-index-M.html index d074a83..b05e1fc 100644 --- a/docs/haddock/doc-index-M.html +++ b/docs/haddock/doc-index-M.html @@ -1,4 +1,4 @@ (Index - M)

 

Index - M

makeQueue2TensorFlow.Queue
matchingFilesTensorFlow.GenOps.Core
matMul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matrixBandPartTensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matTransposeTensorFlow.Ops
maxTensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
maybe'bProto.Tensorflow.Core.Framework.AttrValue
maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
maybe'fProto.Tensorflow.Core.Framework.AttrValue
maybe'funcProto.Tensorflow.Core.Framework.AttrValue
maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'iProto.Tensorflow.Core.Framework.AttrValue
maybe'libraryProto.Tensorflow.Core.Framework.Graph
maybe'listProto.Tensorflow.Core.Framework.AttrValue
maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
maybe'sProto.Tensorflow.Core.Framework.AttrValue
maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
maybe'tensorProto.Tensorflow.Core.Framework.AttrValue
maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
maybe'value 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
maybe'versionsProto.Tensorflow.Core.Framework.Graph
maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
mean 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mergeTensorFlow.GenOps.Core
mergeSummaryTensorFlow.GenOps.Core
mergeV2CheckpointsTensorFlow.GenOps.Core
minTensorFlow.GenOps.Core
minimum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
mirrorPadTensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
MixedListArgTensorFlow.OpGen.ParsedOp
MNISTTensorFlow.Examples.MNIST.Parse
mnistPbTensorFlow.Examples.MNIST.TrainedGraph
modTensorFlow.GenOps.Core
mul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
multinomialTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - M

makeQueueTensorFlow.Queue
matchingFilesTensorFlow.GenOps.Core
matchingFiles'TensorFlow.GenOps.Core
matMul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matMul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
matrixBandPartTensorFlow.GenOps.Core
matrixBandPart'TensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDeterminant'TensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiag'TensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixDiagPart'TensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixInverse'TensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSetDiag'TensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolve'TensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixSolveLs'TensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matrixTriangularSolve'TensorFlow.GenOps.Core
matTransposeTensorFlow.Ops
matTranspose'TensorFlow.Ops
max 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
max'TensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maximum'TensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool'TensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3D'TensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPool3DGrad'TensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGrad'TensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolGradWithArgmax'TensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmax'TensorFlow.GenOps.Core
maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
maybe'audioProto.Tensorflow.Core.Framework.Summary
maybe'bProto.Tensorflow.Core.Framework.AttrValue
maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
maybe'fProto.Tensorflow.Core.Framework.AttrValue
maybe'fileVersionProto.Tensorflow.Core.Util.Event
maybe'funcProto.Tensorflow.Core.Framework.AttrValue
maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'graphDefProto.Tensorflow.Core.Util.Event
maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'histoProto.Tensorflow.Core.Framework.Summary
maybe'iProto.Tensorflow.Core.Framework.AttrValue
maybe'imageProto.Tensorflow.Core.Framework.Summary
maybe'libraryProto.Tensorflow.Core.Framework.Graph
maybe'listProto.Tensorflow.Core.Framework.AttrValue
maybe'logMessageProto.Tensorflow.Core.Util.Event
maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
maybe'sProto.Tensorflow.Core.Framework.AttrValue
maybe'sessionLogProto.Tensorflow.Core.Util.Event
maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
maybe'summaryProto.Tensorflow.Core.Util.Event
maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
maybe'tensor 
1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
2 (Function)Proto.Tensorflow.Core.Framework.Summary
maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
maybe'value 
1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
maybe'versionsProto.Tensorflow.Core.Framework.Graph
maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
mean 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mean' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mergeTensorFlow.GenOps.Core
merge'TensorFlow.GenOps.Core
mergeAllSummariesTensorFlow.Logging
mergeSummaryTensorFlow.GenOps.Core
mergeSummary'TensorFlow.GenOps.Core
mergeV2CheckpointsTensorFlow.GenOps.Core
mergeV2Checkpoints'TensorFlow.GenOps.Core
messageProto.Tensorflow.Core.Util.Event
metaGraphDefProto.Tensorflow.Core.Util.Event
min 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.Summary
min'TensorFlow.GenOps.Core
minimum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
minimum'TensorFlow.GenOps.Core
mirrorPadTensorFlow.GenOps.Core
mirrorPad'TensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
mirrorPadGrad'TensorFlow.GenOps.Core
MixedListArgTensorFlow.OpGen.ParsedOp
MNISTTensorFlow.Examples.MNIST.Parse
mnistPbTensorFlow.Examples.MNIST.TrainedGraph
modTensorFlow.GenOps.Core
mod'TensorFlow.GenOps.Core
MonadBuildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
msgProto.Tensorflow.Core.Util.Event
mul 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
mul' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
multinomialTensorFlow.GenOps.Core
multinomial'TensorFlow.GenOps.Core
mutableDenseHashTableTensorFlow.GenOps.Core
mutableDenseHashTable'TensorFlow.GenOps.Core
mutableHashTableTensorFlow.GenOps.Core
mutableHashTable'TensorFlow.GenOps.Core
mutableHashTableOfTensorsTensorFlow.GenOps.Core
mutableHashTableOfTensors'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-N.html b/docs/haddock/doc-index-N.html index 0f7801c..886aa59 100644 --- a/docs/haddock/doc-index-N.html +++ b/docs/haddock/doc-index-N.html @@ -1,4 +1,4 @@ (Index - N)

 

Index - N

Name 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
name 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
4 (Function)Proto.Tensorflow.Core.Framework.TensorShape
5 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
NameAttrList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
NameAttrList'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
namedTensorFlow.ControlFlow, TensorFlow.Core
neg 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
negTrainTensorFlow.GenOps.Core
nextIterationTensorFlow.GenOps.Core
nodeProto.Tensorflow.Core.Framework.Graph
NodeDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeDef'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeName 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
nodeNameProto.Tensorflow.Core.Protobuf.Config
NodesTensorFlow.Nodes, TensorFlow.Core
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
nonMaxSuppressionTensorFlow.GenOps.Core
noOp 
1 (Function)TensorFlow.ControlFlow, TensorFlow.Core
2 (Function)TensorFlow.GenOps.Core
notEqualTensorFlow.GenOps.Core
numberAttrProto.Tensorflow.Core.Framework.OpDef
numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file +

 

Index - N

Name 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
name 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
4 (Function)Proto.Tensorflow.Core.Framework.TensorShape
5 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
NameAttrList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
NameAttrList'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
2 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
neg 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
neg' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
negTrainTensorFlow.GenOps.Core
negTrain'TensorFlow.GenOps.Core
nextIterationTensorFlow.GenOps.Core
nextIteration'TensorFlow.GenOps.Core
NilTensorFlow.Types
nodeProto.Tensorflow.Core.Framework.Graph
NodeDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeDef'AttrEntry 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
NodeName 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
nodeNameProto.Tensorflow.Core.Framework.Summary
NodesTensorFlow.Nodes, TensorFlow.Core
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
nonMaxSuppressionTensorFlow.GenOps.Core
nonMaxSuppression'TensorFlow.GenOps.Core
noOp 
1 (Function)TensorFlow.ControlFlow, TensorFlow.Core
2 (Function)TensorFlow.GenOps.Core
noOp'TensorFlow.GenOps.Core
notEqualTensorFlow.GenOps.Core
notEqual'TensorFlow.GenOps.Core
numProto.Tensorflow.Core.Framework.Summary
numberAttrProto.Tensorflow.Core.Framework.OpDef
numChannelsProto.Tensorflow.Core.Framework.Summary
numThreadsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file diff --git a/docs/haddock/doc-index-O.html b/docs/haddock/doc-index-O.html index 00cba33..0aba0f2 100644 --- a/docs/haddock/doc-index-O.html +++ b/docs/haddock/doc-index-O.html @@ -1,4 +1,4 @@ (Index - O)

 

Index - O

oneHot 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
OneOfTensorFlow.Types, TensorFlow.Core
OpTensorFlow.Output
op 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
opAttrTensorFlow.Output, TensorFlow.Build
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
4 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefTensorFlow.Build
OpDef'ArgDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
OpDef'AttrDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefWithNameTensorFlow.Build
OpDeprecation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
OpGenFlags 
1 (Data Constructor)TensorFlow.OpGen
2 (Type/Class)TensorFlow.OpGen
opInputsTensorFlow.Output, TensorFlow.Build
OpList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opNameTensorFlow.Output, TensorFlow.Build
OpResultTensorFlow.BuildOp
OptimizerOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
OptionsTensorFlow.Session, TensorFlow.Core
optLevelProto.Tensorflow.Core.Protobuf.Config
OpType 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
opUnrenderedTensorFlow.Output
Output 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputTensorFlow.Output
outputArgProto.Tensorflow.Core.Framework.OpDef
outputFileTensorFlow.OpGen
outputIndexTensorFlow.Output
OutputIx 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputOpTensorFlow.Output
outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
outputSlotProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file +

 

Index - O

obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
oneHot 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
oneHot' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
OneOfTensorFlow.Types, TensorFlow.Core
OneOfsTensorFlow.Types
op 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
opAttrTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
4 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefTensorFlow.Build
OpDef'ArgDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
OpDef'AttrDef 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opDefWithNameTensorFlow.Build
OpDeprecation 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
OpGenFlags 
1 (Data Constructor)TensorFlow.OpGen
2 (Type/Class)TensorFlow.OpGen
opInputsTensorFlow.Output, TensorFlow.Build
OpList 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
2 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
opNameTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
OpParamsTensorFlow.BuildOp
OptimizerOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
OptionsTensorFlow.Session, TensorFlow.Core
optLevelProto.Tensorflow.Core.Protobuf.Config
OpType 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
Output 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputTensorFlow.Output
outputArgProto.Tensorflow.Core.Framework.OpDef
outputFileTensorFlow.OpGen
outputIndexTensorFlow.Output
OutputIx 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
outputNodeNameTensorFlow.Output
outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file diff --git a/docs/haddock/doc-index-P.html b/docs/haddock/doc-index-P.html index f44e14d..ff91dfa 100644 --- a/docs/haddock/doc-index-P.html +++ b/docs/haddock/doc-index-P.html @@ -1,4 +1,4 @@ (Index - P)

 

Index - P

pack 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
padTensorFlow.GenOps.Core
paddingFIFOQueueTensorFlow.GenOps.Core
parameterizedTruncatedNormalTensorFlow.GenOps.Core
ParsedArg 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
ParsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgDescriptionTensorFlow.OpGen.ParsedOp
parsedArgKindTensorFlow.OpGen.ParsedOp
parsedArgNameTensorFlow.OpGen.ParsedOp
parsedInputsTensorFlow.OpGen.ParsedOp
ParsedOp 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
parsedOpDescriptionTensorFlow.OpGen.ParsedOp
parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
parsedOpNameTensorFlow.OpGen.ParsedOp
parsedOpSummaryTensorFlow.OpGen.ParsedOp
parsedOutputsTensorFlow.OpGen.ParsedOp
parseOpTensorFlow.OpGen.ParsedOp
parseTensorTensorFlow.GenOps.Core
partitionGraphsProto.Tensorflow.Core.Protobuf.Config
PendingNodeNameTensorFlow.Output
perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
placeholder 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
placeholderV2TensorFlow.GenOps.Core
placeholderWithDefaultTensorFlow.GenOps.Core
placementPeriodProto.Tensorflow.Core.Protobuf.Config
placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
polygammaTensorFlow.GenOps.Core
powTensorFlow.GenOps.Core
prefixTensorFlow.OpGen
priorityQueueTensorFlow.GenOps.Core
prodTensorFlow.GenOps.Core
protoShapeTensorFlow.Types
putVarIntTensorFlow.Internal.VarInt
\ No newline at end of file +

 

Index - P

pack 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
pack' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
padTensorFlow.GenOps.Core
pad'TensorFlow.GenOps.Core
paddingFIFOQueueTensorFlow.GenOps.Core
paddingFIFOQueue'TensorFlow.GenOps.Core
paddingFIFOQueueV2TensorFlow.GenOps.Core
paddingFIFOQueueV2'TensorFlow.GenOps.Core
parallelConcatTensorFlow.GenOps.Core
parallelConcat'TensorFlow.GenOps.Core
parameterizedTruncatedNormalTensorFlow.GenOps.Core
parameterizedTruncatedNormal'TensorFlow.GenOps.Core
ParsedArg 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
ParsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgCaseTensorFlow.OpGen.ParsedOp
parsedArgDescriptionTensorFlow.OpGen.ParsedOp
parsedArgNameTensorFlow.OpGen.ParsedOp
parsedInputsTensorFlow.OpGen.ParsedOp
ParsedOp 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
parsedOpDescriptionTensorFlow.OpGen.ParsedOp
parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
parsedOpNameTensorFlow.OpGen.ParsedOp
parsedOpSummaryTensorFlow.OpGen.ParsedOp
parsedOutputsTensorFlow.OpGen.ParsedOp
parseExampleTensorFlow.GenOps.Core
parseExample'TensorFlow.GenOps.Core
parseOpTensorFlow.OpGen.ParsedOp
parseSingleSequenceExampleTensorFlow.GenOps.Core
parseSingleSequenceExample'TensorFlow.GenOps.Core
parseTensorTensorFlow.GenOps.Core
parseTensor'TensorFlow.GenOps.Core
partitionGraphsProto.Tensorflow.Core.Protobuf.Config
PendingNodeNameTensorFlow.Output
perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
placeholder 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
placeholder' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
placeholderV2TensorFlow.GenOps.Core
placeholderV2'TensorFlow.GenOps.Core
placeholderWithDefaultTensorFlow.GenOps.Core
placeholderWithDefault'TensorFlow.GenOps.Core
placementPeriodProto.Tensorflow.Core.Protobuf.Config
placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
polygammaTensorFlow.GenOps.Core
polygamma'TensorFlow.GenOps.Core
powTensorFlow.GenOps.Core
pow'TensorFlow.GenOps.Core
prefixTensorFlow.OpGen
preventGradientTensorFlow.GenOps.Core
preventGradient'TensorFlow.GenOps.Core
printTensorFlow.GenOps.Core
print'TensorFlow.GenOps.Core
priorityQueueTensorFlow.GenOps.Core
priorityQueue'TensorFlow.GenOps.Core
priorityQueueV2TensorFlow.GenOps.Core
priorityQueueV2'TensorFlow.GenOps.Core
prodTensorFlow.GenOps.Core
prod'TensorFlow.GenOps.Core
protoShapeTensorFlow.Types
pureOpTensorFlow.BuildOp
PureResultTensorFlow.BuildOp
pureResultTensorFlow.BuildOp
putTFRecordTensorFlow.Records
putTFRecordDataTensorFlow.Records
putTFRecordLengthTensorFlow.Records
putVarIntTensorFlow.Internal.VarInt
\ No newline at end of file diff --git a/docs/haddock/doc-index-Q.html b/docs/haddock/doc-index-Q.html index 9e48ff7..08edba9 100644 --- a/docs/haddock/doc-index-Q.html +++ b/docs/haddock/doc-index-Q.html @@ -1,4 +1,4 @@ (Index - Q)

 

Index - Q

quantizeAndDequantizeTensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
Queue2TensorFlow.Queue
queueCloseTensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - Q

qrTensorFlow.GenOps.Core
qr'TensorFlow.GenOps.Core
quantizeAndDequantizeTensorFlow.GenOps.Core
quantizeAndDequantize'TensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedAvgPool'TensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedBiasAdd'TensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConcat'TensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedConv2D'TensorFlow.GenOps.Core
quantizedInstanceNormTensorFlow.GenOps.Core
quantizedInstanceNorm'TensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMatMul'TensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizedMaxPool'TensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu'TensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedRelu6'TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReluX'TensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizedReshape'TensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
quantizeV2'TensorFlow.GenOps.Core
QueueTensorFlow.Queue
queueCloseTensorFlow.GenOps.Core
queueClose'TensorFlow.GenOps.Core
queueCloseV2TensorFlow.GenOps.Core
queueCloseV2'TensorFlow.GenOps.Core
queueDequeueTensorFlow.GenOps.Core
queueDequeue'TensorFlow.GenOps.Core
queueDequeueManyTensorFlow.GenOps.Core
queueDequeueMany'TensorFlow.GenOps.Core
queueDequeueManyV2TensorFlow.GenOps.Core
queueDequeueManyV2'TensorFlow.GenOps.Core
queueDequeueUpToTensorFlow.GenOps.Core
queueDequeueUpTo'TensorFlow.GenOps.Core
queueDequeueUpToV2TensorFlow.GenOps.Core
queueDequeueUpToV2'TensorFlow.GenOps.Core
queueDequeueV2TensorFlow.GenOps.Core
queueDequeueV2'TensorFlow.GenOps.Core
queueEnqueueTensorFlow.GenOps.Core
queueEnqueue'TensorFlow.GenOps.Core
queueEnqueueManyTensorFlow.GenOps.Core
queueEnqueueMany'TensorFlow.GenOps.Core
queueEnqueueManyV2TensorFlow.GenOps.Core
queueEnqueueManyV2'TensorFlow.GenOps.Core
queueEnqueueV2TensorFlow.GenOps.Core
queueEnqueueV2'TensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
queueSize'TensorFlow.GenOps.Core
queueSizeV2TensorFlow.GenOps.Core
queueSizeV2'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-R.html b/docs/haddock/doc-index-R.html index 3e4cba9..c817ecb 100644 --- a/docs/haddock/doc-index-R.html +++ b/docs/haddock/doc-index-R.html @@ -1,4 +1,4 @@ (Index - R)

 

Index - R

randomCropTensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
range 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
rankTensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readMessageFromFileOrDieTensorFlow.Examples.MNIST.Parse
readMNISTLabelsTensorFlow.Examples.MNIST.Parse
readMNISTSamplesTensorFlow.Examples.MNIST.Parse
readVariableOpTensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reducedShapeTensorFlow.Ops
reduceJoinTensorFlow.GenOps.Core
RefTensorFlow.Tensor, TensorFlow.Core
refEnterTensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
RefKindTensorFlow.Tensor, TensorFlow.Core
refMergeTensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
relu 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu6TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
reluGrad 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
renderTensorFlow.Build, TensorFlow.Core
RenderedTensorFlow.Output
renderedNodeDefsTensorFlow.Build
renderNodeNameTensorFlow.Build
renderOutputTensorFlow.Build
requantizationRangeTensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
reshape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
resizeAreaTensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resourceGatherTensorFlow.GenOps.Core
ResourceHandle 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
4 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
resourceHandleValProto.Tensorflow.Core.Framework.Tensor
resourceScatterAddTensorFlow.GenOps.Core
restore 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
restoreFromNameTensorFlow.Ops
restoreSliceTensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
rsqrtTensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
run 
1 (Function)TensorFlow.Session, TensorFlow.Core
2 (Function)TensorFlow.Internal.FFI
runBuildTTensorFlow.Build
RunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
runSessionTensorFlow.Session, TensorFlow.Core
runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
runWithFeedsTensorFlow.Session, TensorFlow.Core
runWithFeeds_TensorFlow.Session, TensorFlow.Core
run_TensorFlow.Session, TensorFlow.Core
\ No newline at end of file +

 

Index - R

randomCropTensorFlow.GenOps.Core
randomCrop'TensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomGamma'TensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffle'TensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomShuffleQueue'TensorFlow.GenOps.Core
randomShuffleQueueV2TensorFlow.GenOps.Core
randomShuffleQueueV2'TensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomStandardNormal'TensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniform'TensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
randomUniformInt'TensorFlow.GenOps.Core
range 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
range' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
rankTensorFlow.GenOps.Core
rank'TensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumRecordsProduced'TensorFlow.GenOps.Core
readerNumRecordsProducedV2TensorFlow.GenOps.Core
readerNumRecordsProducedV2'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerRead'TensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerReadUpTo'TensorFlow.GenOps.Core
readerReadUpToV2TensorFlow.GenOps.Core
readerReadUpToV2'TensorFlow.GenOps.Core
readerReadV2TensorFlow.GenOps.Core
readerReadV2'TensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerReset'TensorFlow.GenOps.Core
readerResetV2TensorFlow.GenOps.Core
readerResetV2'TensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerRestoreState'TensorFlow.GenOps.Core
readerRestoreStateV2TensorFlow.GenOps.Core
readerRestoreStateV2'TensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readerSerializeState'TensorFlow.GenOps.Core
readerSerializeStateV2TensorFlow.GenOps.Core
readerSerializeStateV2'TensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readFile'TensorFlow.GenOps.Core
readMessageFromFileOrDieTensorFlow.Examples.MNIST.Parse
readMNISTLabelsTensorFlow.Examples.MNIST.Parse
readMNISTSamplesTensorFlow.Examples.MNIST.Parse
readVariableOpTensorFlow.GenOps.Core
readVariableOp'TensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
real'TensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
realDiv'TensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocal'TensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reciprocalGrad'TensorFlow.GenOps.Core
recordInputTensorFlow.GenOps.Core
recordInput'TensorFlow.GenOps.Core
reducedShapeTensorFlow.Ops
reduceJoinTensorFlow.GenOps.Core
reduceJoin'TensorFlow.GenOps.Core
Ref 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
refEnterTensorFlow.GenOps.Core
refEnter'TensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refExit'TensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
refIdentity'TensorFlow.GenOps.Core
refMergeTensorFlow.GenOps.Core
refMerge'TensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refNextIteration'TensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSelect'TensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
refSwitch'TensorFlow.GenOps.Core
relu 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
relu6TensorFlow.GenOps.Core
relu6'TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
relu6Grad'TensorFlow.GenOps.Core
reluGrad 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reluGrad' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
renderTensorFlow.Tensor, TensorFlow.Core
RenderedTensorFlow.Tensor
renderedTensorFlow.Tensor
renderedNodeDefsTensorFlow.Build
renderedOutputTensorFlow.Tensor
renderValueTensorFlow.Tensor
requantizationRangeTensorFlow.GenOps.Core
requantizationRange'TensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
requantize'TensorFlow.GenOps.Core
reshape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
reshape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
resizeAreaTensorFlow.GenOps.Core
resizeArea'TensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBicubic'TensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinear'TensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeBilinearGrad'TensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighbor'TensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resizeNearestNeighborGrad'TensorFlow.GenOps.Core
resourceApplyAdadeltaTensorFlow.GenOps.Core
resourceApplyAdadelta'TensorFlow.GenOps.Core
resourceApplyAdagradTensorFlow.GenOps.Core
resourceApplyAdagrad'TensorFlow.GenOps.Core
resourceApplyAdagradDATensorFlow.GenOps.Core
resourceApplyAdagradDA'TensorFlow.GenOps.Core
resourceApplyAdamTensorFlow.GenOps.Core
resourceApplyAdam'TensorFlow.GenOps.Core
resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceApplyFtrlTensorFlow.GenOps.Core
resourceApplyFtrl'TensorFlow.GenOps.Core
resourceApplyGradientDescentTensorFlow.GenOps.Core
resourceApplyGradientDescent'TensorFlow.GenOps.Core
resourceApplyMomentumTensorFlow.GenOps.Core
resourceApplyMomentum'TensorFlow.GenOps.Core
resourceApplyProximalAdagradTensorFlow.GenOps.Core
resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceApplyRMSPropTensorFlow.GenOps.Core
resourceApplyRMSProp'TensorFlow.GenOps.Core
ResourceArgTensorFlow.OpGen.ParsedOp
resourceGatherTensorFlow.GenOps.Core
resourceGather'TensorFlow.GenOps.Core
ResourceHandle 
1 (Data Constructor)TensorFlow.Output
2 (Type/Class)TensorFlow.Output
3 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
4 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
resourceHandleValProto.Tensorflow.Core.Framework.Tensor
resourceScatterAddTensorFlow.GenOps.Core
resourceScatterAdd'TensorFlow.GenOps.Core
resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
resourceSparseApplyAdagradTensorFlow.GenOps.Core
resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyAdagradDATensorFlow.GenOps.Core
resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceSparseApplyFtrlTensorFlow.GenOps.Core
resourceSparseApplyFtrl'TensorFlow.GenOps.Core
resourceSparseApplyMomentumTensorFlow.GenOps.Core
resourceSparseApplyMomentum'TensorFlow.GenOps.Core
resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceSparseApplyRMSPropTensorFlow.GenOps.Core
resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
restore 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
restore'TensorFlow.GenOps.Core
restoreFromNameTensorFlow.Ops
restoreSliceTensorFlow.GenOps.Core
restoreSlice'TensorFlow.GenOps.Core
restoreV2TensorFlow.GenOps.Core
restoreV2'TensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverse'TensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseSequence'TensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
reverseV2'TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rGBToHSV'TensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
rint'TensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
round'TensorFlow.GenOps.Core
RPCOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
rpcOptionsProto.Tensorflow.Core.Protobuf.Config
rsqrtTensorFlow.GenOps.Core
rsqrt'TensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
rsqrtGrad'TensorFlow.GenOps.Core
run 
1 (Function)TensorFlow.Session, TensorFlow.Core
2 (Function)TensorFlow.Internal.FFI
runBuildTTensorFlow.Build
RunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
runMetadataProto.Tensorflow.Core.Util.Event
RunOptions 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
runRefTensorFlow.Tensor
runSessionTensorFlow.Session, TensorFlow.Core
runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
runValueTensorFlow.Tensor
runWithFeedsTensorFlow.Session, TensorFlow.Core
runWithFeeds_TensorFlow.Session, TensorFlow.Core
run_TensorFlow.Session, TensorFlow.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-S.html b/docs/haddock/doc-index-S.html index c6ac5dc..5d4d4d9 100644 --- a/docs/haddock/doc-index-S.html +++ b/docs/haddock/doc-index-S.html @@ -1,4 +1,4 @@ (Index - S)

 

Index - S

sProto.Tensorflow.Core.Framework.AttrValue
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
saveTensorFlow.Ops
Scalar 
1 (Data Constructor)TensorFlow.Nodes, TensorFlow.Core
2 (Type/Class)TensorFlow.Nodes, TensorFlow.Core
scalarTensorFlow.Ops
scalarizeTensorFlow.Ops
scalarSummaryTensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scomplexValProto.Tensorflow.Core.Framework.Tensor
sdcaFprintTensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
Session 
1 (Type/Class)TensorFlow.Session, TensorFlow.Core
2 (Type/Class)TensorFlow.Internal.FFI
sessionConfigTensorFlow.Session, TensorFlow.Core
sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
sessionTargetTensorFlow.Session, TensorFlow.Core
sessionTracerTensorFlow.Session, TensorFlow.Core
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
Shape 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
shape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
shapeNTensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoidCrossEntropyWithLogitsTensorFlow.NN
sigmoidGradTensorFlow.GenOps.Core
sign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
SimpleArgTensorFlow.OpGen.ParsedOp
sinTensorFlow.GenOps.Core
size 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.TensorShape
sliceTensorFlow.GenOps.Core
softmax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softplusTensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
spaceToBatchTensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseToDense 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
splitTensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stepStatsProto.Tensorflow.Core.Protobuf.Config
stopGradientTensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringValProto.Tensorflow.Core.Framework.Tensor
sub 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
substrTensorFlow.GenOps.Core
sum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
summaryProto.Tensorflow.Core.Framework.OpDef
SummaryTensorTensorFlow.Build
svdTensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - S

sProto.Tensorflow.Core.Framework.AttrValue
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
sampleDistortedBoundingBox'TensorFlow.GenOps.Core
sampleRateProto.Tensorflow.Core.Framework.Summary
save 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
save'TensorFlow.GenOps.Core
saveSlicesTensorFlow.GenOps.Core
saveSlices'TensorFlow.GenOps.Core
saveV2TensorFlow.GenOps.Core
saveV2'TensorFlow.GenOps.Core
Scalar 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
scalarTensorFlow.Ops
scalar'TensorFlow.Ops
scalarizeTensorFlow.Ops
scalarSummary 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Logging
scalarSummary'TensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterAdd'TensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterDiv'TensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterMul'TensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNd'TensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdAdd'TensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdSub'TensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterNdUpdate'TensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterSub'TensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scatterUpdate'TensorFlow.GenOps.Core
scomplexValProto.Tensorflow.Core.Framework.Tensor
sdcaFprintTensorFlow.GenOps.Core
sdcaFprint'TensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaOptimizer'TensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
sdcaShrinkL1'TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMax'TensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMean'TensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentMin'TensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentProd'TensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
segmentSum'TensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
select'TensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEig'TensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
selfAdjointEigV2'TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeManySparse'TensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
serializeSparse'TensorFlow.GenOps.Core
Session 
1 (Type/Class)TensorFlow.Session, TensorFlow.Core
2 (Type/Class)TensorFlow.Internal.FFI
sessionConfigTensorFlow.Session, TensorFlow.Core
sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
SessionLog 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
sessionLogProto.Tensorflow.Core.Util.Event
SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
SessionLog'STARTProto.Tensorflow.Core.Util.Event
SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
SessionLog'STOPProto.Tensorflow.Core.Util.Event
sessionTargetTensorFlow.Session, TensorFlow.Core
sessionTracerTensorFlow.Session, TensorFlow.Core
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
setSizeTensorFlow.GenOps.Core
setSize'TensorFlow.GenOps.Core
Shape 
1 (Data Constructor)TensorFlow.Types, TensorFlow.Core
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
shape 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
shape' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
shapeNTensorFlow.GenOps.Core
shapeN'TensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilename'TensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
shardedFilespec'TensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoid'TensorFlow.GenOps.Core
sigmoidCrossEntropyWithLogitsTensorFlow.NN
sigmoidGradTensorFlow.GenOps.Core
sigmoidGrad'TensorFlow.GenOps.Core
sign 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sign' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
SimpleArgTensorFlow.OpGen.ParsedOp
simpleValueProto.Tensorflow.Core.Framework.Summary
sinTensorFlow.GenOps.Core
sin'TensorFlow.GenOps.Core
sinkTFRecordsTensorFlow.Records.Conduit
size 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.TensorShape
size' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
skipgramTensorFlow.GenOps.Core
skipgram'TensorFlow.GenOps.Core
sliceTensorFlow.GenOps.Core
slice'TensorFlow.GenOps.Core
softmax 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmax' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softmaxCrossEntropyWithLogits' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
softplusTensorFlow.GenOps.Core
softplus'TensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softplusGrad'TensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsign'TensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
softsignGrad'TensorFlow.GenOps.Core
sourceTFRecordsTensorFlow.Records.Conduit
spaceToBatchTensorFlow.GenOps.Core
spaceToBatch'TensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToBatchND'TensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
spaceToDepth'TensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAdd'TensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseAddGrad'TensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdadelta'TensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagrad'TensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyAdagradDA'TensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyFtrl'TensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyMomentum'TensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseApplyRMSProp'TensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseConcat'TensorFlow.GenOps.Core
sparseConditionalAccumulatorTensorFlow.GenOps.Core
sparseConditionalAccumulator'TensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseAdd'TensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseDiv'TensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseDenseCwiseMul'TensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseMatMul'TensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSum'TensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReduceSumSparse'TensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReorder'TensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseReshape'TensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMean'TensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentMeanGrad'TensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtN'TensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSegmentSum'TensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmax'TensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMaximum'TensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSparseMinimum'TensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseSplit'TensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseAdd'TensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseTensorDenseMatMul'TensorFlow.GenOps.Core
sparseToDense 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToDense' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sparseToSparseSetOperationTensorFlow.GenOps.Core
sparseToSparseSetOperation'TensorFlow.GenOps.Core
splitTensorFlow.GenOps.Core
split'TensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
splitV'TensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrt'TensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
sqrtGrad'TensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
square'TensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squaredDifference'TensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
squeeze'TensorFlow.GenOps.Core
stackTensorFlow.GenOps.Core
stack'TensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackClose'TensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPop'TensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stackPush'TensorFlow.GenOps.Core
stageTensorFlow.GenOps.Core
stage'TensorFlow.GenOps.Core
statusProto.Tensorflow.Core.Util.Event
stepProto.Tensorflow.Core.Util.Event
stepStatsProto.Tensorflow.Core.Protobuf.Config
stopGradientTensorFlow.GenOps.Core
stopGradient'TensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSlice'TensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceAssign'TensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stridedSliceGrad'TensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringJoin'TensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringSplit'TensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucket'TensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketFast'TensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToHashBucketStrong'TensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringToNumber'TensorFlow.GenOps.Core
stringValProto.Tensorflow.Core.Framework.Tensor
sub 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
sub' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
substrTensorFlow.GenOps.Core
substr'TensorFlow.GenOps.Core
sum 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
3 (Function)Proto.Tensorflow.Core.Framework.Summary
sum' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
summariesTensorFlow.Build
Summary 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
summary 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Util.Event
Summary'Audio 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Image 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
Summary'Value 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryDescription 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
2 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
SummaryTensor 
1 (Type/Class)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Logging
sumSquaresProto.Tensorflow.Core.Framework.Summary
svdTensorFlow.GenOps.Core
svd'TensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
switch'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index-T.html b/docs/haddock/doc-index-T.html index 52a9a77..a70b346 100644 --- a/docs/haddock/doc-index-T.html +++ b/docs/haddock/doc-index-T.html @@ -1,4 +1,4 @@ (Index - T)

 

Index - T

takeManySparseFromTensorsMapTensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
temporaryVariableTensorFlow.GenOps.Core
Tensor 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
tensorProto.Tensorflow.Core.Framework.AttrValue
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayCloseV2TensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayConcatV2TensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGatherV2TensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayGradV2TensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayReadV2TensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArrayScatterV2TensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySizeV2TensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArraySplitV2TensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorArrayWriteV2TensorFlow.GenOps.Core
tensorAttrTensorFlow.Tensor, TensorFlow.Core
tensorContentProto.Tensorflow.Core.Framework.Tensor
TensorData 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
3 (Data Constructor)TensorFlow.Internal.FFI
4 (Type/Class)TensorFlow.Internal.FFI
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Data Constructor)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorKindTensorFlow.Tensor, TensorFlow.Core
tensorKindTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
TensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
2 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
tensorRefTypeTensorFlow.Types
tensorShapeProto.Tensorflow.Core.Framework.Tensor
TensorShapeProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
TensorShapeProto'Dim 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
tensorSummaryTensorFlow.GenOps.Core
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypesTensorFlow.Types
tensorValTensorFlow.Types
testImageDataTensorFlow.Examples.MNIST.InputData
testLabelDataTensorFlow.Examples.MNIST.InputData
textLineReaderTensorFlow.GenOps.Core
TFName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
tfNameTensorFlow.OpGen.ParsedOp
tFRecordReaderTensorFlow.GenOps.Core
ThreadPoolOptionProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
tileTensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
timelineStepProto.Tensorflow.Core.Protobuf.Config
timeoutInMsProto.Tensorflow.Core.Protobuf.Config
topKTensorFlow.GenOps.Core
topKV2TensorFlow.GenOps.Core
traceLevelProto.Tensorflow.Core.Protobuf.Config
trainingImageDataTensorFlow.Examples.MNIST.InputData
trainingLabelDataTensorFlow.Examples.MNIST.InputData
transpose 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateDivTensorFlow.GenOps.Core
truncatedNormal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateModTensorFlow.GenOps.Core
type' 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
typeAttrProto.Tensorflow.Core.Framework.OpDef
TypeErrorTensorFlow.Types
typeListAttrProto.Tensorflow.Core.Framework.OpDef
\ No newline at end of file +

 

Index - T

tag 
1 (Function)Proto.Tensorflow.Core.Util.Event
2 (Function)Proto.Tensorflow.Core.Framework.Summary
TaggedRunMetadata 
1 (Data Constructor)Proto.Tensorflow.Core.Util.Event
2 (Type/Class)Proto.Tensorflow.Core.Util.Event
taggedRunMetadataProto.Tensorflow.Core.Util.Event
takeManySparseFromTensorsMapTensorFlow.GenOps.Core
takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tan'TensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanh'TensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
tanhGrad'TensorFlow.GenOps.Core
temporaryVariableTensorFlow.GenOps.Core
temporaryVariable'TensorFlow.GenOps.Core
Tensor 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
tensor 
1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
2 (Function)Proto.Tensorflow.Core.Framework.Summary
tensorArrayTensorFlow.GenOps.Core
tensorArray'TensorFlow.GenOps.Core
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayClose'TensorFlow.GenOps.Core
tensorArrayCloseV2TensorFlow.GenOps.Core
tensorArrayCloseV2'TensorFlow.GenOps.Core
tensorArrayCloseV3TensorFlow.GenOps.Core
tensorArrayCloseV3'TensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayConcat'TensorFlow.GenOps.Core
tensorArrayConcatV2TensorFlow.GenOps.Core
tensorArrayConcatV2'TensorFlow.GenOps.Core
tensorArrayConcatV3TensorFlow.GenOps.Core
tensorArrayConcatV3'TensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGather'TensorFlow.GenOps.Core
tensorArrayGatherV2TensorFlow.GenOps.Core
tensorArrayGatherV2'TensorFlow.GenOps.Core
tensorArrayGatherV3TensorFlow.GenOps.Core
tensorArrayGatherV3'TensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayGrad'TensorFlow.GenOps.Core
tensorArrayGradV2TensorFlow.GenOps.Core
tensorArrayGradV2'TensorFlow.GenOps.Core
tensorArrayGradV3TensorFlow.GenOps.Core
tensorArrayGradV3'TensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayPack'TensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayRead'TensorFlow.GenOps.Core
tensorArrayReadV2TensorFlow.GenOps.Core
tensorArrayReadV2'TensorFlow.GenOps.Core
tensorArrayReadV3TensorFlow.GenOps.Core
tensorArrayReadV3'TensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArrayScatter'TensorFlow.GenOps.Core
tensorArrayScatterV2TensorFlow.GenOps.Core
tensorArrayScatterV2'TensorFlow.GenOps.Core
tensorArrayScatterV3TensorFlow.GenOps.Core
tensorArrayScatterV3'TensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySize'TensorFlow.GenOps.Core
tensorArraySizeV2TensorFlow.GenOps.Core
tensorArraySizeV2'TensorFlow.GenOps.Core
tensorArraySizeV3TensorFlow.GenOps.Core
tensorArraySizeV3'TensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArraySplit'TensorFlow.GenOps.Core
tensorArraySplitV2TensorFlow.GenOps.Core
tensorArraySplitV2'TensorFlow.GenOps.Core
tensorArraySplitV3TensorFlow.GenOps.Core
tensorArraySplitV3'TensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayUnpack'TensorFlow.GenOps.Core
tensorArrayV2TensorFlow.GenOps.Core
tensorArrayV2'TensorFlow.GenOps.Core
tensorArrayV3TensorFlow.GenOps.Core
tensorArrayV3'TensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorArrayWrite'TensorFlow.GenOps.Core
tensorArrayWriteV2TensorFlow.GenOps.Core
tensorArrayWriteV2'TensorFlow.GenOps.Core
tensorArrayWriteV3TensorFlow.GenOps.Core
tensorArrayWriteV3'TensorFlow.GenOps.Core
tensorContentProto.Tensorflow.Core.Framework.Tensor
TensorData 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types, TensorFlow.Core
3 (Data Constructor)TensorFlow.Internal.FFI
4 (Type/Class)TensorFlow.Internal.FFI
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
TensorDataTypeTensorFlow.Types, TensorFlow.Core
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Data Constructor)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorKindTensorFlow.Tensor
TensorListTensorFlow.Tensor
tensorListOutputsTensorFlow.Tensor
tensorNodeNameTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
TensorProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
2 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
tensorRefFromNameTensorFlow.Tensor
tensorRefTypeTensorFlow.Types
tensorShapeProto.Tensorflow.Core.Framework.Tensor
TensorShapeProto 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
TensorShapeProto'Dim 
1 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
2 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
tensorSummaryTensorFlow.GenOps.Core
tensorSummary'TensorFlow.GenOps.Core
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypeListTensorFlow.Types
TensorTypeProxy 
1 (Data Constructor)TensorFlow.Types
2 (Type/Class)TensorFlow.Types
TensorTypesTensorFlow.Types
tensorTypesTensorFlow.Types
tensorValTensorFlow.Types
tensorValueFromNameTensorFlow.Tensor
testImageDataTensorFlow.Examples.MNIST.InputData
testLabelDataTensorFlow.Examples.MNIST.InputData
textLineReaderTensorFlow.GenOps.Core
textLineReader'TensorFlow.GenOps.Core
textLineReaderV2TensorFlow.GenOps.Core
textLineReaderV2'TensorFlow.GenOps.Core
TFName 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
tfNameTensorFlow.OpGen.ParsedOp
tFRecordReaderTensorFlow.GenOps.Core
tFRecordReader'TensorFlow.GenOps.Core
tFRecordReaderV2TensorFlow.GenOps.Core
tFRecordReaderV2'TensorFlow.GenOps.Core
ThreadPoolOptionProto 
1 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
2 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
tileTensorFlow.GenOps.Core
tile'TensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
tileGrad'TensorFlow.GenOps.Core
timelineStepProto.Tensorflow.Core.Protobuf.Config
timeoutInMsProto.Tensorflow.Core.Protobuf.Config
toBuildTensorFlow.Tensor
topKTensorFlow.GenOps.Core
topK'TensorFlow.GenOps.Core
topKV2TensorFlow.GenOps.Core
topKV2'TensorFlow.GenOps.Core
traceLevelProto.Tensorflow.Core.Protobuf.Config
trainingImageDataTensorFlow.Examples.MNIST.InputData
trainingLabelDataTensorFlow.Examples.MNIST.InputData
transpose 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
transpose' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateDivTensorFlow.GenOps.Core
truncateDiv'TensorFlow.GenOps.Core
truncatedNormal 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncatedNormal' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
truncateModTensorFlow.GenOps.Core
truncateMod'TensorFlow.GenOps.Core
type' 
1 (Function)Proto.Tensorflow.Core.Framework.OpDef
2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
typeAttrProto.Tensorflow.Core.Framework.OpDef
TypeErrorTensorFlow.Types
typeHintProto.Tensorflow.Core.Framework.Summary
typeListAttrProto.Tensorflow.Core.Framework.OpDef
TypeParam 
1 (Data Constructor)TensorFlow.OpGen.ParsedOp
2 (Type/Class)TensorFlow.OpGen.ParsedOp
typeParamIsListTensorFlow.OpGen.ParsedOp
typeParamRestrictionsTensorFlow.OpGen.ParsedOp
\ No newline at end of file diff --git a/docs/haddock/doc-index-U.html b/docs/haddock/doc-index-U.html index 3ed0fac..ef799fe 100644 --- a/docs/haddock/doc-index-U.html +++ b/docs/haddock/doc-index-U.html @@ -1,4 +1,4 @@ (Index - U)

 

Index - U

unControlNodeTensorFlow.Output, TensorFlow.Build
unHaskellNameTensorFlow.OpGen.ParsedOp
uniformCandidateSamplerTensorFlow.GenOps.Core
UniqueTensorFlow.Build
uniqueTensorFlow.GenOps.Core
uniqueWithCountsTensorFlow.GenOps.Core
unknownRankProto.Tensorflow.Core.Framework.TensorShape
unNodeNameTensorFlow.Output
unOpTypeTensorFlow.Output
unOutputIxTensorFlow.Output
unpackTensorFlow.GenOps.Core
UnrenderedTensorFlow.Output
unScalarTensorFlow.Nodes, TensorFlow.Core
unsortedSegmentSumTensorFlow.GenOps.Core
unTensorDataTensorFlow.Types
unTFNameTensorFlow.OpGen.ParsedOp
usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
useProtoAsVoidPtrLenTensorFlow.Internal.FFI
\ No newline at end of file +

 

Index - U

unControlNodeTensorFlow.Output, TensorFlow.Build
unHaskellNameTensorFlow.OpGen.ParsedOp
uniformCandidateSamplerTensorFlow.GenOps.Core
uniformCandidateSampler'TensorFlow.GenOps.Core
UniqueTensorFlow.Build
uniqueTensorFlow.GenOps.Core
unique'TensorFlow.GenOps.Core
uniqueWithCountsTensorFlow.GenOps.Core
uniqueWithCounts'TensorFlow.GenOps.Core
unknownRankProto.Tensorflow.Core.Framework.TensorShape
unNodeNameTensorFlow.Output
unOpTypeTensorFlow.Output
unOutputIxTensorFlow.Output
unpackTensorFlow.GenOps.Core
unpack'TensorFlow.GenOps.Core
unScalarTensorFlow.Types, TensorFlow.Core
unsortedSegmentSumTensorFlow.GenOps.Core
unsortedSegmentSum'TensorFlow.GenOps.Core
unstageTensorFlow.GenOps.Core
unstage'TensorFlow.GenOps.Core
unTensorDataTensorFlow.Types
unTFNameTensorFlow.OpGen.ParsedOp
usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
useProtoAsVoidPtrLenTensorFlow.Internal.FFI
useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file diff --git a/docs/haddock/doc-index-V.html b/docs/haddock/doc-index-V.html index 96eea50..6c813c5 100644 --- a/docs/haddock/doc-index-V.html +++ b/docs/haddock/doc-index-V.html @@ -1,4 +1,4 @@ (Index - V)

 

Index - V

ValueTensorFlow.Tensor, TensorFlow.Core
value 
1 (Function)TensorFlow.Tensor, TensorFlow.Core
2 (Function)Proto.Tensorflow.Core.Protobuf.Config
3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
ValueKindTensorFlow.Tensor, TensorFlow.Core
varHandleOpTensorFlow.GenOps.Core
variable 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
varIsInitializedOpTensorFlow.GenOps.Core
vectorTensorFlow.Ops
version 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
versionNumberProto.Tensorflow.Core.Framework.Tensor
versionsProto.Tensorflow.Core.Framework.Graph
visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file +

 

Index - V

Value 
1 (Data Constructor)TensorFlow.Tensor
2 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
value 
1 (Function)TensorFlow.Tensor, TensorFlow.Core
2 (Function)Proto.Tensorflow.Core.Protobuf.Config
3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
4 (Function)Proto.Tensorflow.Core.Framework.AttrValue
5 (Function)Proto.Tensorflow.Core.Framework.Summary
varHandleOpTensorFlow.GenOps.Core
varHandleOp'TensorFlow.GenOps.Core
variable 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
variable' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
variableV2TensorFlow.GenOps.Core
variableV2'TensorFlow.GenOps.Core
varIsInitializedOpTensorFlow.GenOps.Core
varIsInitializedOp'TensorFlow.GenOps.Core
vectorTensorFlow.Ops
vector'TensorFlow.Ops
version 
1 (Function)Proto.Tensorflow.Core.Framework.Graph
2 (Function)Proto.Tensorflow.Core.Framework.OpDef
versionNumberProto.Tensorflow.Core.Framework.Tensor
versionsProto.Tensorflow.Core.Framework.Graph
visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
\ No newline at end of file diff --git a/docs/haddock/doc-index-W.html b/docs/haddock/doc-index-W.html index e76cc6e..b404456 100644 --- a/docs/haddock/doc-index-W.html +++ b/docs/haddock/doc-index-W.html @@ -1,4 +1,4 @@ (Index - W)

 

Index - W

where'TensorFlow.GenOps.Core
wholeFileReaderTensorFlow.GenOps.Core
withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
withDeviceTensorFlow.Build, TensorFlow.Core
withNameScopeTensorFlow.Build, TensorFlow.Core
withNodeDependenciesTensorFlow.Build
withSessionTensorFlow.Internal.FFI
withStateLensTensorFlow.Build
writeFileTensorFlow.GenOps.Core
wtsCkptTensorFlow.Examples.MNIST.TrainedGraph
\ No newline at end of file +

 

Index - W

wallTimeProto.Tensorflow.Core.Util.Event
where'TensorFlow.GenOps.Core
where''TensorFlow.GenOps.Core
wholeFileReaderTensorFlow.GenOps.Core
wholeFileReader'TensorFlow.GenOps.Core
wholeFileReaderV2TensorFlow.GenOps.Core
wholeFileReaderV2'TensorFlow.GenOps.Core
widthProto.Tensorflow.Core.Framework.Summary
withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
withDeviceTensorFlow.Build, TensorFlow.Core
withEventWriterTensorFlow.Logging
withNameScopeTensorFlow.Build, TensorFlow.Core
withNodeDependenciesTensorFlow.Build
withSessionTensorFlow.Internal.FFI
withStateLensTensorFlow.Build
writeFileTensorFlow.GenOps.Core
writeFile'TensorFlow.GenOps.Core
wtsCkptTensorFlow.Examples.MNIST.TrainedGraph
\ No newline at end of file diff --git a/docs/haddock/doc-index-Z.html b/docs/haddock/doc-index-Z.html index 473d679..1626417 100644 --- a/docs/haddock/doc-index-Z.html +++ b/docs/haddock/doc-index-Z.html @@ -1,4 +1,4 @@ (Index - Z)

 

Index - Z

zeroInitializedVariableTensorFlow.Ops
zerosTensorFlow.Ops
zerosLike 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zetaTensorFlow.GenOps.Core
\ No newline at end of file +

 

Index - Z

zeroInitializedVariableTensorFlow.Ops
zeroInitializedVariable'TensorFlow.Ops
zerosTensorFlow.Ops
zerosLike 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zerosLike' 
1 (Function)TensorFlow.GenOps.Core
2 (Function)TensorFlow.Ops
zetaTensorFlow.GenOps.Core
zeta'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/doc-index.html b/docs/haddock/doc-index.html index 07ad04d..bfab859 100644 --- a/docs/haddock/doc-index.html +++ b/docs/haddock/doc-index.html @@ -1,4 +1,4 @@ (Index)

 

\ No newline at end of file +

 

\ No newline at end of file diff --git a/docs/haddock/index-frames.html b/docs/haddock/index-frames.html index 6c8264a..1058a34 100644 --- a/docs/haddock/index-frames.html +++ b/docs/haddock/index-frames.html @@ -1,4 +1,4 @@

Modules

\ No newline at end of file +

Modules

\ No newline at end of file diff --git a/docs/haddock/index.html b/docs/haddock/index.html index d2f74f5..02ab365 100644 --- a/docs/haddock/index.html +++ b/docs/haddock/index.html @@ -1,4 +1,4 @@

 

Modules

\ No newline at end of file +

 

Modules

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html index a3b1d44..04a5c62 100644 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Build.html @@ -1,27 +1,14 @@ TensorFlow.Build

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Build

Contents

Synopsis

Graph node types

newtype ControlNode Source

A type of graph node which has no outputs. These nodes are - valuable for causing side effects when they are run.

Constructors

ControlNode 

Fields

unControlNode :: Op
 

Ops

The Build monad

render :: Tensor v a -> Build (Tensor v a) Source

Render a Tensor, fixing its name, scope, device and control inputs from - the Build context. Also renders any dependencies of the Tensor that - weren't already rendered.

This operation is idempotent; render >=> render === render. However, - rendering a (previously un-rendered) Tensor in two different contexts - may result in two different Tensors.

renderNodeName :: Tensor v a -> Build NodeName Source

Render a Tensor and get its node's name.

data BuildT m a Source

An action for building nodes in a TensorFlow graph. - Used to manage build state internally as part of the Session monad.

type Build = BuildT Identity Source

An action for building nodes in a TensorFlow graph.

addInitializer :: ControlNode -> Build () Source

Registers the given node to be executed before the next - run.

hoistBuildT :: (forall a. m a -> n a) -> BuildT m b -> BuildT n b Source

This is Control.Monad.Morph.hoist sans the dependency.

evalBuildT :: Monad m => BuildT m a -> m a Source

asGraphDef :: Build a -> GraphDef Source

Produce a GraphDef proto representation of the nodes that are rendered in - the given Build action.

flushInitializers :: Monad m => BuildT m [NodeName] Source

Get all the initializers that have accumulated so far, and clear - that buffer.

flushNodeBuffer :: Monad m => BuildT m [NodeDef] Source

Get all the NodeDefs that have accumulated so far, and clear that buffer.

Creating and looking up Ops

getOrAddOp :: Op -> Build NodeName Source

Render the given op if it hasn't been rendered already, and return its - name.

addNewOp :: OpDef -> Build NodeDef Source

Add a new node for a given OpDef. This is used for making "stateful" ops - which are not safe to dedup (e.g, "variable" and "assign").

renderOutput :: Output -> Build Text Source

Render an Output and return a string representation for the TensorFlow - foreign APIs.

Modifying all nodes in a Build action

colocateWith :: forall a v b. Tensor v b -> Build a -> Build a Source

Places all nodes rendered in the given Build action on the same - device as the given Tensor (see also withDevice). Make sure that - the action has side effects of rendering the desired tensors. A pure - return would not have the desired effect.

withStateLens :: MonadState s m => Lens' s a -> (a -> a) -> m b -> m b Source

Modify some part of the state, run an action, and restore the state - after that action is done.

withDevice :: Maybe Device -> Build a -> Build a Source

Set a device for all nodes rendered in the given Build action - (unless further overridden by another use of withDevice).

withNameScope :: Text -> Build a -> Build a Source

Prepend a scope to all nodes rendered in the given Build action.

withNodeDependencies :: Set NodeName -> Build a -> Build a Source

Add control inputs to all nodes rendered in the given Build action.

Internal Summary related bits.

addSummary :: SummaryTensor -> Build () Source

Records the given summary action in Build for retrieval with - collectAllSummaries. The summary op is required to produce a - Summary protocol buffer in string form. For safety, use the - pre-composed functions: Logging.scalarSummary and - Logging.histogramSummary.

type SummaryTensor = Tensor Value ByteString Source

Synonym for the tensors that return serialized Summary proto.

collectAllSummaries :: Monad m => BuildT m [SummaryTensor] Source

Retrieves the summary ops collected thus far. Typically this only - happens once, but if buildWithSummary is used - repeatedly, the values accumulate.

\ No newline at end of file +

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Build

Contents

Synopsis

Graph node types

newtype ControlNode

A type of graph node which has no outputs. These nodes are + valuable for causing side effects when they are run.

Constructors

ControlNode 

data Unique

Instances

Ops

opAttr :: Attribute a => Text -> Lens' OpDef a

opInputs :: Lens' OpDef [Output]

The Build monad

data GraphState

Instances

Monad m => MonadState GraphState (BuildT m) 

data BuildT m a

An action for building nodes in a TensorFlow graph. + Used to manage build state internally as part of the Session monad.

Instances

MonadTrans BuildT 
TensorKind Build 
Monad m => MonadState GraphState (BuildT m) 
Monad m => Monad (BuildT m) 
Functor m => Functor (BuildT m) 
Monad m => Applicative (BuildT m) 
MonadIO m => MonadIO (BuildT m) 
MonadThrow m => MonadThrow (BuildT m) 
MonadMask m => MonadMask (BuildT m) 
MonadCatch m => MonadCatch (BuildT m) 
Monad m => MonadBuild (BuildT m) 
TensorTypes as => PureResult (TensorList Build as) 
PureResult (Tensor Build a) 

type Build = BuildT Identity

An action for building nodes in a TensorFlow graph.

class Monad m => MonadBuild m where

Lift a Build action into a monad, including any explicit op renderings.

Methods

build :: Build a -> m a

addInitializer :: MonadBuild m => ControlNode -> m ()

Registers the given node to be executed before the next + run.

hoistBuildT :: (forall a. m a -> n a) -> BuildT m b -> BuildT n b

This is Control.Monad.Morph.hoist sans the dependency.

evalBuildT :: Monad m => BuildT m a -> m a

runBuildT :: BuildT m a -> m (a, GraphState)

asGraphDef :: Build a -> GraphDef

Produce a GraphDef proto representation of the nodes that are rendered in + the given Build action.

flushInitializers :: Monad m => BuildT m [NodeName]

Get all the initializers that have accumulated so far, and clear + that buffer.

flushNodeBuffer :: MonadBuild m => m [NodeDef]

Get all the NodeDefs that have accumulated so far, and clear that buffer.

Creating and looking up Ops

getOrAddOp :: OpDef -> Build NodeName

Render the given op if it hasn't been rendered already, and return its + name.

addNewOp :: OpDef -> Build NodeName

Add a new node for a given OpDef. This is used for making "stateful" ops + which are not safe to dedup (e.g, "variable" and "assign").

encodeOutput :: Output -> Text

Turn an Output into a string representation for the TensorFlow + foreign APIs.

Modifying all nodes in a Build action

withStateLens :: MonadBuild m => Lens' GraphState a -> (a -> a) -> m b -> m b

Modify some part of the state, run an action, and restore the state + after that action is done.

withDevice :: MonadBuild m => Maybe Device -> m a -> m a

Set a device for all nodes rendered in the given Build action + (unless further overridden by another use of withDevice).

withNameScope :: MonadBuild m => Text -> m a -> m a

Prepend a scope to all nodes rendered in the given Build action.

withNodeDependencies :: MonadBuild m => Set NodeName -> m a -> m a

Add control inputs to all nodes rendered in the given Build action.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html index dcad805..674fe17 100644 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-BuildOp.html @@ -1,6 +1,6 @@ TensorFlow.BuildOp

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.BuildOp

Synopsis

Documentation

class OpResult a Source

Class of types that can be used as op outputs.

Minimal complete definition

toResult

Instances

OpResult ControlNode Source 
OpResult a => OpResult [a] Source 
OpResult (ResourceHandle a) Source 
(OpResult a1, OpResult a2) => OpResult (a1, a2) Source 
OpResult (Tensor Ref a) Source 
OpResult (Tensor Value a) Source 
(OpResult a1, OpResult a2, OpResult a3) => OpResult (a1, a2, a3) Source 
(OpResult a1, OpResult a2, OpResult a3, OpResult a4) => OpResult (a1, a2, a3, a4) Source 
(OpResult a1, OpResult a2, OpResult a3, OpResult a4, OpResult a5) => OpResult (a1, a2, a3, a4, a5) Source 
(OpResult a1, OpResult a2, OpResult a3, OpResult a4, OpResult a5, OpResult a6) => OpResult (a1, a2, a3, a4, a5, a6) Source 

class BuildOp f Source

Class of types that can be used as op functions.

Minimal complete definition

buildOp'

Instances

BuildOp ControlNode Source 
BuildOp [Tensor Value a] Source 
BuildOp (ResourceHandle a) Source 
OpResult a => BuildOp (Build a) Source 
BuildOp f => BuildOp ([Tensor v a] -> f) Source 
BuildOp f => BuildOp (ResourceHandle a -> f) Source 
BuildOp f => BuildOp (Tensor v a -> f) Source 
(OpResult t1, OpResult t2) => BuildOp (t1, t2) Source 
BuildOp (Tensor Ref a) Source 
BuildOp (Tensor Value a) Source 
(OpResult t1, OpResult t2, OpResult t3) => BuildOp (t1, t2, t3) Source 
(OpResult t1, OpResult t2, OpResult t3, OpResult t4) => BuildOp (t1, t2, t3, t4) Source 
(OpResult t1, OpResult t2, OpResult t3, OpResult t4, OpResult t5) => BuildOp (t1, t2, t3, t4, t5) Source 
(OpResult t1, OpResult t2, OpResult t3, OpResult t4, OpResult t5, OpResult t6) => BuildOp (t1, t2, t3, t4, t5, t6) Source 

buildOp :: BuildOp f => OpDef -> f Source

Starts an operation that returns a structured set of tensors - (singletons or tuples).

buildListOp Source

Arguments

:: BuildOp f 
=> [Int64]

Cardinality of the corresponding list of tensors output.

-> OpDef 
-> f 

Starts an operation that returns a list of tensors.

eqLengthGuard :: [(String, [(String, Int)])] -> Bool Source

Returns true if all the integers in each tuple are identical. - Throws an error with a descriptive message if not.

\ No newline at end of file +

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.BuildOp

Synopsis

Documentation

class BuildResult a where

Class of types that can be used as op outputs.

Methods

buildResult :: Result a

Instances

BuildResult ResourceHandle 
BuildResult ControlNode 
BuildResult a => BuildResult [a] 
(BuildResult a1, BuildResult a2) => BuildResult (a1, a2) 
(Rendered v, TensorTypes as) => BuildResult (TensorList v as) 
Rendered v => BuildResult (Tensor v a) 
(BuildResult a1, BuildResult a2, BuildResult a3) => BuildResult (a1, a2, a3) 
(BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4) => BuildResult (a1, a2, a3, a4) 
(BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5) => BuildResult (a1, a2, a3, a4, a5) 
(BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5, BuildResult a6) => BuildResult (a1, a2, a3, a4, a5, a6) 
(BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5, BuildResult a6, BuildResult a7) => BuildResult (a1, a2, a3, a4, a5, a6, a7) 
(BuildResult a1, BuildResult a2, BuildResult a3, BuildResult a4, BuildResult a5, BuildResult a6, BuildResult a7, BuildResult a8) => BuildResult (a1, a2, a3, a4, a5, a6, a7, a8) 

buildOp :: BuildResult a => [Int64] -> OpDef -> Build a

class PureResult a where

Class of types that can be used as op outputs.

Methods

pureResult :: ReaderT (Build OpDef) (State ResultState) a

Instances

PureResult a => PureResult [a] 
(PureResult a1, PureResult a2) => PureResult (a1, a2) 
TensorTypes as => PureResult (TensorList Build as) 
PureResult (Tensor Build a) 
(PureResult a1, PureResult a2, PureResult a3) => PureResult (a1, a2, a3) 
(PureResult a1, PureResult a2, PureResult a3, PureResult a4) => PureResult (a1, a2, a3, a4) 
(PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5) => PureResult (a1, a2, a3, a4, a5) 
(PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5, PureResult a6) => PureResult (a1, a2, a3, a4, a5, a6) 
(PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5, PureResult a6, PureResult a7) => PureResult (a1, a2, a3, a4, a5, a6, a7) 
(PureResult a1, PureResult a2, PureResult a3, PureResult a4, PureResult a5, PureResult a6, PureResult a7, PureResult a8) => PureResult (a1, a2, a3, a4, a5, a6, a7, a8) 

pureOp :: PureResult a => [Int64] -> Build OpDef -> a

eqLengthGuard :: [(String, [(String, Int)])] -> Bool

Returns true if all the integers in each tuple are identical. + Throws an error with a descriptive message if not.

type OpParams = OpDef -> OpDef

Parameters to build an op (for example, the node name or optional attributes). + TODO: be more type safe.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html index bfd5f5b..dbf1957 100644 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-ControlFlow.html @@ -1,9 +1,6 @@ TensorFlow.ControlFlow

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.ControlFlow

Contents

Synopsis

Dependencies

withControlDependencies :: Nodes t => t -> Build a -> Build a Source

Modify a Build action, such that all new ops rendered in it will depend - on the nodes in the first argument.

group :: Nodes t => t -> Build ControlNode Source

Create an op that groups multiple operations.

When this op finishes, all ops in the input n have finished. This op has - no output.

Operations

identity :: TensorType a => Tensor v a -> Tensor v a Source

Returns a Tensor with the same shape and contents as the input.

noOp :: ControlNode Source

Does nothing. Only useful as a placeholder for control edges.

named :: TensorType a => Text -> Tensor v a -> Tensor v a Source

Returns a Tensor with a given name and the same shape and contents as - the input.

TODO(judahjacobson): This breaks when used with uninitialize Tensor Refs, - since RefIdentity doesn't have SetAllowsUninitializedInput(). Look into - whether we can change that op.

\ No newline at end of file +

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.ControlFlow

Contents

Synopsis

Dependencies

withControlDependencies :: (MonadBuild m, Nodes t) => t -> m a -> m a

Modify a Build action, such that all new ops rendered in it will depend + on the nodes in the first argument.

group :: (MonadBuild m, Nodes t) => t -> m ControlNode

Create an op that groups multiple operations.

When this op finishes, all ops in the input n have finished. This op has + no output.

Operations

noOp :: MonadBuild m => m ControlNode

Does nothing. Only useful as a placeholder for control edges.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Core.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Core.html index ad255f9..a3079ba 100644 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Core.html +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Core.html @@ -1,61 +1,49 @@ TensorFlow.Core

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Core

Contents

Description

The core functionality of TensorFlow.

Unless you are defining ops, you do not need to import other modules from +

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Core

Description

The core functionality of TensorFlow.

Unless you are defining ops, you do not need to import other modules from this package.

Basic ops are provided in the tensorflow-ops and tensorflow-core-ops - packages.

Synopsis

Session

data Options Source

Customization for session. Use the lenses to update: - sessionTarget, sessionTracer, sessionConfig.

sessionConfig :: Lens' Options ConfigProto Source

Uses the specified config for the created session.

sessionTarget :: Lens' Options ByteString Source

Target can be: "local", ip:port, host:port. - The set of supported factories depends on the linked in libraries.

sessionTracer :: Lens' Options Tracer Source

Uses the given logger to monitor session progress.

runSession :: Session a -> IO a Source

Run Session actions in a new TensorFlow session.

runSessionWithOptions :: Options -> Session a -> IO a Source

Run Session actions in a new TensorFlow session created with - the given option setter actions (sessionTarget, sessionConfig).

Building graphs

build :: Build a -> Session a Source

Lift a Build action into a Session, including any explicit op - renderings.

buildAnd :: (a -> Session b) -> Build a -> Session b Source

Helper combinator for doing something with the result of a Build action. - Example usage:

buildAnd run :: Fetchable t a => Build t -> Session a

buildWithSummary :: forall a. Build a -> Session (a, [SummaryTensor]) Source

Lift a Build action into a Session, including any explicit op - renderings. Returns the merged summary ops which can be used for - logging, see build for a convenient wrapper.

Running graphs

class Nodes t => Fetchable t a Source

Types that tensor representations (e.g. Tensor, ControlNode) can be - fetched into.

Includes collections of tensors (e.g. tuples).

Minimal complete definition

getFetch

Instances

(~) * a () => Fetchable ControlNode a Source 
Fetchable t a => Fetchable [t] [a] Source 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (Scalar a') Source 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (Vector a') Source 
(Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) Source 
(Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3) => Fetchable (t1, t2, t3) (a1, a2, a3) Source 

newtype Scalar a Source

Constructors

Scalar 

Fields

unScalar :: a
 

Instances

class Nodes t Source

Types that contain ops which can be run.

Minimal complete definition

getNodes

Instances

Nodes ControlNode Source 
Nodes t => Nodes [t] Source 
(Nodes t1, Nodes t2) => Nodes (t1, t2) Source 
Nodes (Tensor v a) Source 
(Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) Source 

run :: Fetchable t a => t -> Session a Source

Run a subgraph t, rendering any dependent nodes that aren't already - rendered, and fetch the corresponding values for a.

run_ :: Nodes t => t -> Session () Source

Run a subgraph t, rendering and extending any dependent nodes that aren't + packages.

Synopsis

Session

data Session a

data Options

Customization for session. Use the lenses to update: + sessionTarget, sessionTracer, sessionConfig.

Instances

Default Options 

sessionConfig :: Lens' Options ConfigProto

Uses the specified config for the created session.

sessionTarget :: Lens' Options ByteString

Target can be: "local", ip:port, host:port. + The set of supported factories depends on the linked in libraries.

sessionTracer :: Lens' Options Tracer

Uses the given logger to monitor session progress.

runSession :: Session a -> IO a

Run Session actions in a new TensorFlow session.

runSessionWithOptions :: Options -> Session a -> IO a

Run Session actions in a new TensorFlow session created with + the given option setter actions (sessionTarget, sessionConfig).

Building graphs

class Monad m => MonadBuild m where

Lift a Build action into a monad, including any explicit op renderings.

Methods

build :: Build a -> m a

Running graphs

class Nodes t => Fetchable t a

Types that tensor representations (e.g. Tensor, ControlNode) can be + fetched into.

Includes collections of tensors (e.g. tuples).

Minimal complete definition

getFetch

Instances

(~) * a () => Fetchable ControlNode a 
Fetchable t a => Fetchable [t] [a] 
(~) * l (List ([] *)) => Fetchable (ListOf f ([] *)) l 
(TensorType a, TensorDataType s a, (~) * a a') => Fetchable (Tensor v a) (s a') 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 
(Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) 
(Fetchable (f t) a, Fetchable (ListOf f ts) (List as), (~) (* -> *) i Identity) => Fetchable (ListOf f ((:) * t ts)) (ListOf i ((:) * a as)) 
(Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3) => Fetchable (t1, t2, t3) (a1, a2, a3) 

class Nodes t

Types that contain ops which can be run.

Minimal complete definition

getNodes

Instances

Nodes ControlNode 
Nodes t => Nodes [t] 
(Nodes t1, Nodes t2) => Nodes (t1, t2) 
(Nodes (f a), Nodes (ListOf f as)) => Nodes (ListOf f ((:) * a as)) 
Nodes (ListOf f ([] *)) 
Nodes (Tensor v a) 
(Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) 

run :: Fetchable t a => t -> Session a

Run a subgraph t, rendering any dependent nodes that aren't already + rendered, and fetch the corresponding values for a.

run_ :: Nodes t => t -> Session ()

Run a subgraph t, rendering and extending any dependent nodes that aren't already rendered. This behaves like run except that it doesn't do any - fetches.

data Feed Source

A pair of a Tensor and some data that should be fed into that Tensor - when running the graph.

feed :: Tensor v a -> TensorData a -> Feed Source

Create a Feed for feeding the given data into a Tensor when running + fetches.

data Feed

A pair of a Tensor and some data that should be fed into that Tensor + when running the graph.

feed :: Rendered v => Tensor v a -> TensorData a -> Feed

Create a Feed for feeding the given data into a Tensor when running the graph.

Note that if a Tensor is rendered, its identity may change; so feeding the - rendered Tensor may be different than feeding the original Tensor.

runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a Source

Run a subgraph t, rendering any dependent nodes that aren't already + rendered Tensor may be different than feeding the original Tensor.

runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a

Run a subgraph t, rendering any dependent nodes that aren't already rendered, feed the given input values, and fetch the corresponding result - values for a.

runWithFeeds_ :: Nodes t => [Feed] -> t -> Session () Source

Run a subgraph t, rendering any dependent nodes that aren't already + values for a.

runWithFeeds_ :: Nodes t => [Feed] -> t -> Session ()

Run a subgraph t, rendering any dependent nodes that aren't already rendered, feed the given input values, and fetch the corresponding result values for a. This behaves like runWithFeeds except that it doesn't do - any fetches.

Async

asyncProdNodes Source

Arguments

:: Nodes t 
=> t

Node to evaluate concurrently.

-> Session () 

Starts a concurrent thread which evaluates the given Nodes + any fetches.

Async

asyncProdNodes

Arguments

:: Nodes t 
=> t

Node to evaluate concurrently.

-> Session () 

Starts a concurrent thread which evaluates the given Nodes forever until runSession exits or an exception occurs. Graph extension happens synchronously, but the resultant run proceeds as - a separate thread.

Build

type Build = BuildT Identity Source

An action for building nodes in a TensorFlow graph.

data BuildT m a Source

An action for building nodes in a TensorFlow graph. - Used to manage build state internally as part of the Session monad.

render :: Tensor v a -> Build (Tensor v a) Source

Render a Tensor, fixing its name, scope, device and control inputs from - the Build context. Also renders any dependencies of the Tensor that - weren't already rendered.

This operation is idempotent; render >=> render === render. However, - rendering a (previously un-rendered) Tensor in two different contexts - may result in two different Tensors.

asGraphDef :: Build a -> GraphDef Source

Produce a GraphDef proto representation of the nodes that are rendered in - the given Build action.

Tensor

data ControlNode Source

A type of graph node which has no outputs. These nodes are - valuable for causing side effects when they are run.

data Tensor v a Source

A named output of a TensorFlow operation.

The type parameter a is the type of the elements in the Tensor. The - parameter v is either Value or Ref, depending on whether the graph is - treating this op output as an immutable Value or a stateful Ref (e.g., a - variable). Note that a Tensor Ref can be casted into a Tensor Value via - value.

Instances

data TensorKind v where Source

This class provides a runtime switch on whether a Tensor should be - treated as a Value or as a Ref.

tensorAttr :: Attribute attr => Text -> Traversal' (Tensor v a) attr Source

Lens for the attributes of a tensor.

Only valid if the tensor has not yet been rendered. If the tensor has been - rendered, the traversal will be over nothing (nothing can be read or - written).

value :: Tensor v a -> Tensor Value a Source

Cast a 'Tensor *' into a 'Tensor Value'. Common usage is to cast a - Ref into Value. This behaves like a no-op.

tensorFromName :: TensorKind v -> Text -> Tensor v a Source

Create a Tensor for a given name. This can be used to reference nodes - in a GraphDef that was loaded via addGraphDef. - TODO(judahjacobson): add more safety checks here.

Element types

data TensorData a Source

Data about a tensor that is encoded for the TensorFlow APIs.

class TensorType a where Source

The class of scalar types supported by tensorflow.

Methods

decodeTensorData :: TensorData a -> Vector a Source

Decode the bytes of a TensorData into a Vector.

encodeTensorData :: Shape -> Vector a -> TensorData a Source

Encode a Vector into a TensorData.

The values should be in row major order, e.g.,

element 0: index (0, ..., 0) + a separate thread.

Build

type Build = BuildT Identity

An action for building nodes in a TensorFlow graph.

data BuildT m a

An action for building nodes in a TensorFlow graph. + Used to manage build state internally as part of the Session monad.

Instances

MonadTrans BuildT 
TensorKind Build 
Monad m => MonadState GraphState (BuildT m) 
Monad m => Monad (BuildT m) 
Functor m => Functor (BuildT m) 
Monad m => Applicative (BuildT m) 
MonadIO m => MonadIO (BuildT m) 
MonadThrow m => MonadThrow (BuildT m) 
MonadMask m => MonadMask (BuildT m) 
MonadCatch m => MonadCatch (BuildT m) 
Monad m => MonadBuild (BuildT m) 
TensorTypes as => PureResult (TensorList Build as) 
PureResult (Tensor Build a) 

render :: MonadBuild m => Tensor Build a -> m (Tensor Value a)

Render a Tensor, fixing its name, scope, device and control inputs from + the MonadBuild context. Also renders any dependencies of the Tensor that + weren't already rendered.

This operation is idempotent; calling render on the same input in the same + context will produce the same result. However, rendering the same + Tensor Build in two different contexts may result in two different + Tensor Values.

asGraphDef :: Build a -> GraphDef

Produce a GraphDef proto representation of the nodes that are rendered in + the given Build action.

opAttr :: Attribute a => Text -> Lens' OpDef a

Tensor

data ControlNode

A type of graph node which has no outputs. These nodes are + valuable for causing side effects when they are run.

data Tensor v a

A named output of a TensorFlow operation.

The type parameter a is the type of the elements in the Tensor. The + parameter v is either:

  • Build: An unrendered, immutable value.
  • Value: A rendered, immutable value.
  • Ref: A rendered stateful handle (e.g., a variable).

Note that expr, value, render and renderValue can help convert between + the different types of Tensor.

Instances

BuildInputs (ListOf (Tensor v) as) 
BuildInputs (Tensor v a) 
TensorTypes as => PureResult (TensorList Build as) 
PureResult (Tensor Build a) 
(Rendered v, TensorTypes as) => BuildResult (TensorList v as) 
Rendered v => BuildResult (Tensor v a) 
Nodes (Tensor v a) 
(TensorType a, TensorDataType s a, (~) * a a') => Fetchable (Tensor v a) (s a') 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 

value :: Tensor Ref a -> Tensor Value a

Cast a 'Tensor Ref' into a 'Tensor Value'. This behaves like a no-op.

tensorFromName :: TensorKind v => Text -> Tensor v a

Create a Tensor for a given name. This can be used to reference nodes + in a GraphDef that was loaded via addGraphDef. + TODO(judahjacobson): add more safety checks here.

expr :: TensorKind v => Tensor v a -> Tensor Build a

Element types

data TensorData a

Tensor data with the correct memory layout for tensorflow.

Instances

(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 

class TensorType a => TensorDataType s a where

Types that can be converted to and from TensorData.

Vector is the most efficient to encode/decode for most element types.

Methods

decodeTensorData :: TensorData a -> s a

Decode the bytes of a TensorData into an s.

encodeTensorData :: Shape -> s a -> TensorData a

Encode an s into a TensorData.

The values should be in row major order, e.g.,

element 0: index (0, ..., 0) element 1: index (0, ..., 1) - ...

newtype Shape Source

Shape (dimensions) of a tensor.

Constructors

Shape [Int64] 

type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a) Source

A Constraint specifying the possible choices of a TensorType.

We implement a Constraint like OneOf '[Double, Float] a by turning the + ...

newtype Scalar a

Constructors

Scalar 

Fields

unScalar :: a
 

Instances

TensorDataType Vector a => TensorDataType Scalar a 
Eq a => Eq (Scalar a) 
Floating a => Floating (Scalar a) 
Fractional a => Fractional (Scalar a) 
Num a => Num (Scalar a) 
Ord a => Ord (Scalar a) 
Real a => Real (Scalar a) 
RealFloat a => RealFloat (Scalar a) 
RealFrac a => RealFrac (Scalar a) 
Show a => Show (Scalar a) 
IsString a => IsString (Scalar a) 

newtype Shape

Shape (dimensions) of a tensor.

Constructors

Shape [Int64] 

type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a)

A Constraint specifying the possible choices of a TensorType.

We implement a Constraint like OneOf '[Double, Float] a by turning the natural representation as a conjunction, i.e.,

   a == Double || a == Float
 

into a disjunction like

    a /= Int32 && a /= Int64 && a /= ByteString && ...
-

using an enumeration of all the possible TensorTypes.

type family a /= b :: Constraint Source

A constraint checking that two types are different.

Equations

a /= a = TypeError a ~ ExcludedCase 
a /= b = () 

Op combinators

colocateWith :: forall a v b. Tensor v b -> Build a -> Build a Source

Places all nodes rendered in the given Build action on the same +

using an enumeration of all the possible TensorTypes.

type family a /= b :: Constraint

A constraint checking that two types are different.

Equations

a /= a = TypeError a ~ ExcludedCase 
a /= b = () 

Op combinators

colocateWith :: (MonadBuild m, Rendered v) => Tensor v b -> m a -> m a

Places all nodes rendered in the given Build action on the same device as the given Tensor (see also withDevice). Make sure that the action has side effects of rendering the desired tensors. A pure - return would not have the desired effect.

newtype Device Source

A device that a node can be assigned to. + return would not have the desired effect.

newtype Device

A device that a node can be assigned to. There's a naming convention where the device names - are constructed from job and replica names.

Constructors

Device 

Fields

deviceName :: Text
 

withDevice :: Maybe Device -> Build a -> Build a Source

Set a device for all nodes rendered in the given Build action - (unless further overridden by another use of withDevice).

withNameScope :: Text -> Build a -> Build a Source

Prepend a scope to all nodes rendered in the given Build action.

named :: TensorType a => Text -> Tensor v a -> Tensor v a Source

Returns a Tensor with a given name and the same shape and contents as - the input.

TODO(judahjacobson): This breaks when used with uninitialize Tensor Refs, - since RefIdentity doesn't have SetAllowsUninitializedInput(). Look into - whether we can change that op.

Dependencies

withControlDependencies :: Nodes t => t -> Build a -> Build a Source

Modify a Build action, such that all new ops rendered in it will depend - on the nodes in the first argument.

group :: Nodes t => t -> Build ControlNode Source

Create an op that groups multiple operations.

When this op finishes, all ops in the input n have finished. This op has - no output.

Misc

identity :: TensorType a => Tensor v a -> Tensor v a Source

Returns a Tensor with the same shape and contents as the input.

noOp :: ControlNode Source

Does nothing. Only useful as a placeholder for control edges.

\ No newline at end of file + are constructed from job and replica names.

Constructors

Device 

Fields

deviceName :: Text
 

withDevice :: MonadBuild m => Maybe Device -> m a -> m a

Set a device for all nodes rendered in the given Build action + (unless further overridden by another use of withDevice).

withNameScope :: MonadBuild m => Text -> m a -> m a

Prepend a scope to all nodes rendered in the given Build action.

Dependencies

withControlDependencies :: (MonadBuild m, Nodes t) => t -> m a -> m a

Modify a Build action, such that all new ops rendered in it will depend + on the nodes in the first argument.

group :: (MonadBuild m, Nodes t) => t -> m ControlNode

Create an op that groups multiple operations.

When this op finishes, all ops in the input n have finished. This op has + no output.

Misc

noOp :: MonadBuild m => m ControlNode

Does nothing. Only useful as a placeholder for control edges.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html index 8f7e56d..bc163c6 100644 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-FFI.html @@ -1,8 +1,8 @@ TensorFlow.Internal.FFI

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Internal.FFI

Contents

Synopsis

Documentation

withSession Source

Arguments

:: (SessionOptions -> IO ()) 
-> ((IO () -> IO ()) -> Session -> IO a)

The action can spawn concurrent tasks which will +

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Internal.FFI

Synopsis

Documentation

data Session

withSession

Arguments

:: (SessionOptions -> IO ()) 
-> ((IO () -> IO ()) -> Session -> IO a)

The action can spawn concurrent tasks which will be canceled before withSession returns.

-> IO a 

Runs the given action after creating a session with options - populated by the given optionSetter.

run Source

Arguments

:: Session 
-> [(ByteString, TensorData)]

Feeds.

-> [ByteString]

Fetches.

-> [ByteString]

Targets.

-> IO [TensorData] 

data TensorData Source

All of the data needed to represent a tensor.

setSessionConfig :: ConfigProto -> SessionOptions -> IO () Source

setSessionTarget :: ByteString -> SessionOptions -> IO () Source

getAllOpList :: IO ByteString Source

Returns the serialized OpList of all OpDefs defined in this - address space.

Internal helper.

useProtoAsVoidPtrLen :: (Message msg, Integral c, Show c, Bits c) => msg -> (Ptr b -> c -> IO a) -> IO a Source

Serializes the given msg and provides it as (ptr,len) argument + populated by the given optionSetter.

run

Arguments

:: Session 
-> [(ByteString, TensorData)]

Feeds.

-> [ByteString]

Fetches.

-> [ByteString]

Targets.

-> IO [TensorData] 

data TensorData

All of the data needed to represent a tensor.

setSessionConfig :: ConfigProto -> SessionOptions -> IO ()

setSessionTarget :: ByteString -> SessionOptions -> IO ()

getAllOpList :: IO ByteString

Returns the serialized OpList of all OpDefs defined in this + address space.

Internal helper.

useProtoAsVoidPtrLen :: (Message msg, Integral c, Show c, Bits c) => msg -> (Ptr b -> c -> IO a) -> IO a

Serializes the given msg and provides it as (ptr,len) argument to the given action.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html index 14c3dcc..1f4db93 100644 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Internal-VarInt.html @@ -1,4 +1,4 @@ TensorFlow.Internal.VarInt

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellSafe
LanguageHaskell2010

TensorFlow.Internal.VarInt

Description

Originally taken from internal proto-lens code.

Documentation

getVarInt :: Parser Word64 Source

Decode an unsigned varint.

putVarInt :: Word64 -> Builder Source

Encode a Word64.

\ No newline at end of file +

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellSafe
LanguageHaskell2010

TensorFlow.Internal.VarInt

Description

Originally taken from internal proto-lens code.

Synopsis

Documentation

getVarInt :: Parser Word64

Decode an unsigned varint.

putVarInt :: Word64 -> Builder

Encode a Word64.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html index 5971080..5b12a0e 100644 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Nodes.html @@ -1,6 +1,6 @@ TensorFlow.Nodes

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Nodes

Synopsis

Documentation

class Nodes t where Source

Types that contain ops which can be run.

Methods

getNodes :: t -> Build (Set NodeName) Source

Instances

Nodes ControlNode Source 
Nodes t => Nodes [t] Source 
(Nodes t1, Nodes t2) => Nodes (t1, t2) Source 
Nodes (Tensor v a) Source 
(Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) Source 

class Nodes t => Fetchable t a where Source

Types that tensor representations (e.g. Tensor, ControlNode) can be - fetched into.

Includes collections of tensors (e.g. tuples).

Methods

getFetch :: t -> Build (Fetch a) Source

Instances

(~) * a () => Fetchable ControlNode a Source 
Fetchable t a => Fetchable [t] [a] Source 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (Scalar a') Source 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (Vector a') Source 
(Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) Source 
(Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3) => Fetchable (t1, t2, t3) (a1, a2, a3) Source 

data Fetch a Source

Fetch action. Keeps track of what needs to be fetched and how to decode - the fetched data.

Constructors

Fetch 

Fields

fetches :: Set Text

Nodes to fetch

fetchRestore :: Map Text TensorData -> a

Function to create an a from the fetched data.

nodesUnion :: (Monoid b, Traversable t, Applicative f) => t (f b) -> f b Source

fetchTensorVector :: forall a v. TensorType a => Tensor v a -> Build (Fetch (Shape, Vector a)) Source

newtype Scalar a Source

Constructors

Scalar 

Fields

unScalar :: a
 

Instances

\ No newline at end of file +

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Nodes

Synopsis

Documentation

class Nodes t where

Types that contain ops which can be run.

Methods

getNodes :: t -> Build (Set NodeName)

Instances

Nodes ControlNode 
Nodes t => Nodes [t] 
(Nodes t1, Nodes t2) => Nodes (t1, t2) 
(Nodes (f a), Nodes (ListOf f as)) => Nodes (ListOf f ((:) * a as)) 
Nodes (ListOf f ([] *)) 
Nodes (Tensor v a) 
(Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) 

class Nodes t => Fetchable t a where

Types that tensor representations (e.g. Tensor, ControlNode) can be + fetched into.

Includes collections of tensors (e.g. tuples).

Methods

getFetch :: t -> Build (Fetch a)

Instances

(~) * a () => Fetchable ControlNode a 
Fetchable t a => Fetchable [t] [a] 
(~) * l (List ([] *)) => Fetchable (ListOf f ([] *)) l 
(TensorType a, TensorDataType s a, (~) * a a') => Fetchable (Tensor v a) (s a') 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 
(Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) 
(Fetchable (f t) a, Fetchable (ListOf f ts) (List as), (~) (* -> *) i Identity) => Fetchable (ListOf f ((:) * t ts)) (ListOf i ((:) * a as)) 
(Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3) => Fetchable (t1, t2, t3) (a1, a2, a3) 

data Fetch a

Fetch action. Keeps track of what needs to be fetched and how to decode + the fetched data.

Constructors

Fetch 

Fields

fetches :: Set Text

Nodes to fetch

fetchRestore :: Map Text TensorData -> a

Function to create an a from the fetched data.

nodesUnion :: (Monoid b, Traversable t, Applicative f) => t (f b) -> f b

fetchTensorVector :: forall a v. TensorType a => Tensor v a -> Build (Fetch (TensorData a))

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html index bf26950..166ef5c 100644 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Output.html @@ -1,16 +1,13 @@ TensorFlow.Output

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Output

Contents

Documentation

newtype ControlNode Source

A type of graph node which has no outputs. These nodes are - valuable for causing side effects when they are run.

Constructors

ControlNode 

Fields

unControlNode :: Op
 

newtype Device Source

A device that a node can be assigned to. +

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Output

Contents

Documentation

newtype ControlNode

A type of graph node which has no outputs. These nodes are + valuable for causing side effects when they are run.

Constructors

ControlNode 

newtype Device

A device that a node can be assigned to. There's a naming convention where the device names - are constructed from job and replica names.

Constructors

Device 

Fields

deviceName :: Text
 

Ops

newtype NodeName Source

The name of a node in the graph. This corresponds to the proto field + are constructed from job and replica names.

Constructors

Device 

Fields

deviceName :: Text
 

Ops

newtype NodeName

The name of a node in the graph. This corresponds to the proto field NodeDef.name. Includes the scope prefix (if any) and a unique identifier - (if the node was implicitly named).

Constructors

NodeName 

Fields

unNodeName :: Text
 

data Op Source

The representation of a node in a TensorFlow graph.

Constructors

Rendered !NodeDef

Properties are fixed, including the - device, name, and scope.

Unrendered !OpDef

Properties are not fixed, and may change depending - on which context this op is rendered in.

opUnrendered :: Traversal' Op OpDef Source

Traverse on the Unrendered of an Op.

Same implementation as _Left.

data OpDef Source

Op definition. This corresponds somewhat to the NodeDef proto.

newtype OpType Source

The type of op of a node in the graph. This corresponds to the proto field - NodeDef.op.

Constructors

OpType 

Fields

unOpType :: Text
 

data Output Source

An output of a TensorFlow node.

Constructors

Output !OutputIx !Op 

data PendingNodeName Source

The name specified for an unrendered Op. If an Op has an + (if the node was implicitly named).

Constructors

NodeName 

Fields

unNodeName :: Text
 

data OpDef

Op definition. This corresponds somewhat to the NodeDef proto.

Instances

opAttr :: Attribute a => Text -> Lens' OpDef a

opInputs :: Lens' OpDef [Output]

newtype OpType

The type of op of a node in the graph. This corresponds to the proto field + NodeDef.op.

Constructors

OpType 

Fields

unOpType :: Text
 

newtype OutputIx

Constructors

OutputIx 

Fields

unOutputIx :: Int
 

data Output

An output of a TensorFlow node.

Constructors

Output 

data PendingNodeName

The name specified for an unrendered Op. If an Op has an ImplicitName, it will be assigned based on the opType plus a - unique identifier. Does not contain the "scope" prefix.

newtype ResourceHandle a Source

Opaque handle to a mutable resource in the graph. Typical such - resources are variables. The type parameter corresponds to the - dtype of the tensor held in the variable.

Constructors

ResourceHandle Output 
\ No newline at end of file + unique identifier. Does not contain the "scope" prefix.

Constructors

ExplicitName !Text 
ImplicitName 

newtype ResourceHandle

Opaque handle to a mutable resource in the graph. Typical such + resources are variables.

Constructors

ResourceHandle Output 
\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html index a93b363..5b40269 100644 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Session.html @@ -1,23 +1,19 @@ TensorFlow.Session

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Session

Documentation

data Options Source

Customization for session. Use the lenses to update: - sessionTarget, sessionTracer, sessionConfig.

sessionConfig :: Lens' Options ConfigProto Source

Uses the specified config for the created session.

sessionTarget :: Lens' Options ByteString Source

Target can be: "local", ip:port, host:port. - The set of supported factories depends on the linked in libraries.

sessionTracer :: Lens' Options Tracer Source

Uses the given logger to monitor session progress.

runSession :: Session a -> IO a Source

Run Session actions in a new TensorFlow session.

runSessionWithOptions :: Options -> Session a -> IO a Source

Run Session actions in a new TensorFlow session created with - the given option setter actions (sessionTarget, sessionConfig).

build :: Build a -> Session a Source

Lift a Build action into a Session, including any explicit op - renderings.

buildAnd :: (a -> Session b) -> Build a -> Session b Source

Helper combinator for doing something with the result of a Build action. - Example usage:

buildAnd run :: Fetchable t a => Build t -> Session a

buildWithSummary :: forall a. Build a -> Session (a, [SummaryTensor]) Source

Lift a Build action into a Session, including any explicit op - renderings. Returns the merged summary ops which can be used for - logging, see build for a convenient wrapper.

extend :: Session () Source

Add all pending rendered nodes to the TensorFlow graph and runs - any pending initializers.

Note that run, runWithFeeds, etc. will all call this function implicitly.

run :: Fetchable t a => t -> Session a Source

Run a subgraph t, rendering any dependent nodes that aren't already - rendered, and fetch the corresponding values for a.

runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a Source

Run a subgraph t, rendering any dependent nodes that aren't already +

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Session

Synopsis

Documentation

data Session a

data Options

Customization for session. Use the lenses to update: + sessionTarget, sessionTracer, sessionConfig.

Instances

Default Options 

sessionConfig :: Lens' Options ConfigProto

Uses the specified config for the created session.

sessionTarget :: Lens' Options ByteString

Target can be: "local", ip:port, host:port. + The set of supported factories depends on the linked in libraries.

sessionTracer :: Lens' Options Tracer

Uses the given logger to monitor session progress.

runSession :: Session a -> IO a

Run Session actions in a new TensorFlow session.

runSessionWithOptions :: Options -> Session a -> IO a

Run Session actions in a new TensorFlow session created with + the given option setter actions (sessionTarget, sessionConfig).

class Monad m => MonadBuild m where

Lift a Build action into a monad, including any explicit op renderings.

Methods

build :: Build a -> m a

extend :: Session ()

Add all pending rendered nodes to the TensorFlow graph and runs + any pending initializers.

Note that run, runWithFeeds, etc. will all call this function implicitly.

run :: Fetchable t a => t -> Session a

Run a subgraph t, rendering any dependent nodes that aren't already + rendered, and fetch the corresponding values for a.

runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a

Run a subgraph t, rendering any dependent nodes that aren't already rendered, feed the given input values, and fetch the corresponding result - values for a.

run_ :: Nodes t => t -> Session () Source

Run a subgraph t, rendering and extending any dependent nodes that aren't + values for a.

run_ :: Nodes t => t -> Session ()

Run a subgraph t, rendering and extending any dependent nodes that aren't already rendered. This behaves like run except that it doesn't do any - fetches.

runWithFeeds_ :: Nodes t => [Feed] -> t -> Session () Source

Run a subgraph t, rendering any dependent nodes that aren't already + fetches.

runWithFeeds_ :: Nodes t => [Feed] -> t -> Session ()

Run a subgraph t, rendering any dependent nodes that aren't already rendered, feed the given input values, and fetch the corresponding result values for a. This behaves like runWithFeeds except that it doesn't do - any fetches.

asyncProdNodes Source

Arguments

:: Nodes t 
=> t

Node to evaluate concurrently.

-> Session () 

Starts a concurrent thread which evaluates the given Nodes + any fetches.

asyncProdNodes

Arguments

:: Nodes t 
=> t

Node to evaluate concurrently.

-> Session () 

Starts a concurrent thread which evaluates the given Nodes forever until runSession exits or an exception occurs. Graph extension happens synchronously, but the resultant run proceeds as a separate thread.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html index aea1faf..5199bf9 100644 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Tensor.html @@ -1,17 +1,25 @@ TensorFlow.Tensor

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Tensor

Documentation

data Tensor v a Source

A named output of a TensorFlow operation.

The type parameter a is the type of the elements in the Tensor. The - parameter v is either Value or Ref, depending on whether the graph is - treating this op output as an immutable Value or a stateful Ref (e.g., a - variable). Note that a Tensor Ref can be casted into a Tensor Value via - value.

Constructors

Tensor (TensorKind v) Output 

Instances

data TensorKind v where Source

This class provides a runtime switch on whether a Tensor should be - treated as a Value or as a Ref.

tensorAttr :: Attribute attr => Text -> Traversal' (Tensor v a) attr Source

Lens for the attributes of a tensor.

Only valid if the tensor has not yet been rendered. If the tensor has been - rendered, the traversal will be over nothing (nothing can be read or - written).

value :: Tensor v a -> Tensor Value a Source

Cast a 'Tensor *' into a 'Tensor Value'. Common usage is to cast a - Ref into Value. This behaves like a no-op.

data Feed Source

A pair of a Tensor and some data that should be fed into that Tensor - when running the graph.

Constructors

Feed Output TensorData 

feed :: Tensor v a -> TensorData a -> Feed Source

Create a Feed for feeding the given data into a Tensor when running +

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Tensor

Synopsis

Documentation

data Tensor v a where

A named output of a TensorFlow operation.

The type parameter a is the type of the elements in the Tensor. The + parameter v is either:

  • Build: An unrendered, immutable value.
  • Value: A rendered, immutable value.
  • Ref: A rendered stateful handle (e.g., a variable).

Note that expr, value, render and renderValue can help convert between + the different types of Tensor.

Constructors

Tensor :: TensorKind v => v Output -> Tensor v a 

Fields

tensorOutput :: v Output
 

Instances

BuildInputs (ListOf (Tensor v) as) 
BuildInputs (Tensor v a) 
TensorTypes as => PureResult (TensorList Build as) 
PureResult (Tensor Build a) 
(Rendered v, TensorTypes as) => BuildResult (TensorList v as) 
Rendered v => BuildResult (Tensor v a) 
Nodes (Tensor v a) 
(TensorType a, TensorDataType s a, (~) * a a') => Fetchable (Tensor v a) (s a') 
(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 

newtype Value a

Constructors

Value 

Fields

runValue :: a
 

newtype Ref a

Constructors

Ref 

Fields

runRef :: a
 

value :: Tensor Ref a -> Tensor Value a

Cast a 'Tensor Ref' into a 'Tensor Value'. This behaves like a no-op.

data Feed

A pair of a Tensor and some data that should be fed into that Tensor + when running the graph.

Constructors

Feed Output TensorData 

class TensorKind v => Rendered v where

A class ensuring that a given tensor is rendered, i.e., has a fixed + name, device, etc.

Methods

rendered :: v a -> a

feed :: Rendered v => Tensor v a -> TensorData a -> Feed

Create a Feed for feeding the given data into a Tensor when running the graph.

Note that if a Tensor is rendered, its identity may change; so feeding the - rendered Tensor may be different than feeding the original Tensor.

tensorFromName :: TensorKind v -> Text -> Tensor v a Source

Create a Tensor for a given name. This can be used to reference nodes - in a GraphDef that was loaded via addGraphDef. - TODO(judahjacobson): add more safety checks here.

\ No newline at end of file + rendered Tensor may be different than feeding the original Tensor.

tensorFromName :: TensorKind v => Text -> Tensor v a

Create a Tensor for a given name. This can be used to reference nodes + in a GraphDef that was loaded via addGraphDef. + TODO(judahjacobson): add more safety checks here.

tensorValueFromName :: Text -> Tensor Value a

Like tensorFromName, but type-restricted to Value.

tensorRefFromName :: Text -> Tensor Ref a

Like tensorFromName, but type-restricted to Ref.

type TensorList v = ListOf (Tensor v)

colocateWith :: (MonadBuild m, Rendered v) => Tensor v b -> m a -> m a

Places all nodes rendered in the given Build action on the same + device as the given Tensor (see also withDevice). Make sure that + the action has side effects of rendering the desired tensors. A pure + return would not have the desired effect.

render :: MonadBuild m => Tensor Build a -> m (Tensor Value a)

Render a Tensor, fixing its name, scope, device and control inputs from + the MonadBuild context. Also renders any dependencies of the Tensor that + weren't already rendered.

This operation is idempotent; calling render on the same input in the same + context will produce the same result. However, rendering the same + Tensor Build in two different contexts may result in two different + Tensor Values.

expr :: TensorKind v => Tensor v a -> Tensor Build a

addSummary

Arguments

:: (MonadBuild m, TensorKind v) 
=> Tensor v ByteString

A SummaryTensor

-> m () 

Records the given summary action in Build for retrieval with + Summary protocol buffer in string form. For safety, use the + pre-composed functions: Logging.scalarSummary and + Logging.histogramSummary.

collectAllSummaries :: MonadBuild m => m [SummaryTensor]

Retrieves the summary ops collected thus far. Typically this only + happens once, but if buildWithSummary is used + repeatedly, the values accumulate.

type SummaryTensor = Tensor Value ByteString

Synonym for the tensors that return serialized Summary proto.

class Monad v => TensorKind v where

An internal class for kinds of Tensors.

Methods

toBuild :: v a -> Build a

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html index 28057b4..779507e 100644 --- a/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html +++ b/docs/haddock/tensorflow-0.1.0.0/TensorFlow-Types.html @@ -1,13 +1,12 @@ TensorFlow.Types

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Types

Synopsis

Documentation

class TensorType a where Source

The class of scalar types supported by tensorflow.

Methods

tensorType :: a -> DataType Source

tensorRefType :: a -> DataType Source

tensorVal :: Lens' TensorProto [a] Source

decodeTensorData :: TensorData a -> Vector a Source

Decode the bytes of a TensorData into a Vector.

encodeTensorData :: Shape -> Vector a -> TensorData a Source

Encode a Vector into a TensorData.

The values should be in row major order, e.g.,

element 0: index (0, ..., 0) +

tensorflow-0.1.0.0: TensorFlow bindings.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.Types

Synopsis

Documentation

newtype TensorData a

Tensor data with the correct memory layout for tensorflow.

Constructors

TensorData 

Instances

(TensorType a, (~) * a a') => Fetchable (Tensor v a) (TensorData a') 

class TensorType a => TensorDataType s a where

Types that can be converted to and from TensorData.

Vector is the most efficient to encode/decode for most element types.

Methods

decodeTensorData :: TensorData a -> s a

Decode the bytes of a TensorData into an s.

encodeTensorData :: Shape -> s a -> TensorData a

Encode an s into a TensorData.

The values should be in row major order, e.g.,

element 0: index (0, ..., 0) element 1: index (0, ..., 1) - ...

newtype TensorData a Source

Data about a tensor that is encoded for the TensorFlow APIs.

Constructors

TensorData 

newtype Shape Source

Shape (dimensions) of a tensor.

Constructors

Shape [Int64] 

Type constraints

type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a) Source

A Constraint specifying the possible choices of a TensorType.

We implement a Constraint like OneOf '[Double, Float] a by turning the + ...

newtype Scalar a

Constructors

Scalar 

Fields

unScalar :: a
 

Instances

TensorDataType Vector a => TensorDataType Scalar a 
Eq a => Eq (Scalar a) 
Floating a => Floating (Scalar a) 
Fractional a => Fractional (Scalar a) 
Num a => Num (Scalar a) 
Ord a => Ord (Scalar a) 
Real a => Real (Scalar a) 
RealFloat a => RealFloat (Scalar a) 
RealFrac a => RealFrac (Scalar a) 
Show a => Show (Scalar a) 
IsString a => IsString (Scalar a) 

newtype Shape

Shape (dimensions) of a tensor.

Constructors

Shape [Int64] 

Lists

data ListOf f as where

A heterogeneous list type.

Constructors

Nil :: ListOf f `[]` 
(:/) :: f a -> ListOf f as -> ListOf f (a : as) infixr 5 

Instances

All Eq (Map f as) => Eq (ListOf f as) 
All Show (Map f as) => Show (ListOf f as) 
BuildInputs (ListOf (Tensor v) as) 
TensorTypes as => PureResult (TensorList Build as) 
(Rendered v, TensorTypes as) => BuildResult (TensorList v as) 
(Nodes (f a), Nodes (ListOf f as)) => Nodes (ListOf f ((:) * a as)) 
Nodes (ListOf f ([] *)) 
(~) * l (List ([] *)) => Fetchable (ListOf f ([] *)) l 
(Fetchable (f t) a, Fetchable (ListOf f ts) (List as), (~) (* -> *) i Identity) => Fetchable (ListOf f ((:) * t ts)) (ListOf i ((:) * a as)) 

(/:/) :: a -> List as -> List (a : as) infixr 5

Equivalent of :/ for lists.

data TensorTypeProxy a where

class TensorTypes ts where

Instances

TensorTypes ([] *) 
(TensorType t, TensorTypes ts) => TensorTypes ((:) * t ts)

A constraint that the input is a list of TensorTypes.

fromTensorTypes :: forall as. TensorTypes as => Proxy as -> [DataType]

Type constraints

type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a)

A Constraint specifying the possible choices of a TensorType.

We implement a Constraint like OneOf '[Double, Float] a by turning the natural representation as a conjunction, i.e.,

   a == Double || a == Float
 

into a disjunction like

    a /= Int32 && a /= Int64 && a /= ByteString && ...
-

using an enumeration of all the possible TensorTypes.

type family a /= b :: Constraint Source

A constraint checking that two types are different.

Equations

a /= a = TypeError a ~ ExcludedCase 
a /= b = () 

Implementation of constraints

data TypeError a Source

Helper types to produce a reasonable type error message when the Constraint +

using an enumeration of all the possible TensorTypes.

type family a /= b :: Constraint

A constraint checking that two types are different.

Equations

a /= a = TypeError a ~ ExcludedCase 
a /= b = () 

type OneOfs ts as = (TensorTypes as, TensorTypes ts, NoneOfs (AllTensorTypes \\ ts) as)

Implementation of constraints

data TypeError a

Helper types to produce a reasonable type error message when the Constraint "a /= a" fails. - TODO(judahjacobson): Use ghc-8's CustomTypeErrors for this.

type family TensorTypes ts :: Constraint Source

A Constraint checking that the input is a list of TensorTypes. - Helps improve error messages when using OneOf.

Equations

TensorTypes `[]` = () 
TensorTypes (t : ts) = (TensorType t, TensorTypes ts) 

type family NoneOf ts a :: Constraint Source

A constraint that the type a doesn't appear in the type list ts. - Assumes that a and each of the elements of ts are TensorTypes.

Equations

NoneOf `[]` a = () 
NoneOf (t : ts) a = (a /= t, NoneOf ts a) 

type family as \\ bs Source

Takes the difference of two lists of types.

Equations

as \\ `[]` = as 
as \\ (b : bs) = Delete b as \\ bs 

type family Delete a as Source

Removes a type from the given list of types.

Equations

Delete a `[]` = `[]` 
Delete a (a : as) = Delete a as 
Delete a (b : as) = b : Delete a as 

type AllTensorTypes = `[Float, Double, Int8, Int16, Int32, Int64, Word8, Word16, ByteString, Bool]` Source

An enumeration of all valid TensorTypes.

\ No newline at end of file + TODO(judahjacobson): Use ghc-8's CustomTypeErrors for this.

type family NoneOf ts a :: Constraint

A constraint that the type a doesn't appear in the type list ts. + Assumes that a and each of the elements of ts are TensorTypes.

Equations

NoneOf (t1 : (t2 : (t3 : (t4 : ts)))) a = (a /= t1, a /= t2, a /= t3, a /= t4, NoneOf ts a) 
NoneOf (t1 : (t2 : (t3 : ts))) a = (a /= t1, a /= t2, a /= t3, NoneOf ts a) 
NoneOf (t1 : (t2 : ts)) a = (a /= t1, a /= t2, NoneOf ts a) 
NoneOf (t1 : ts) a = (a /= t1, NoneOf ts a) 
NoneOf `[]` a = () 

type family as \\ bs

Takes the difference of two lists of types.

Equations

as \\ `[]` = as 
as \\ (b : bs) = Delete b as \\ bs 

type family Delete a as

Removes a type from the given list of types.

Equations

Delete a `[]` = `[]` 
Delete a (a : as) = Delete a as 
Delete a (b : as) = b : Delete a as 

type AllTensorTypes = `[Float, Double, Int8, Int16, Int32, Int64, Word8, Word16, ByteString, Bool]`

An enumeration of all valid TensorTypes.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-124.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-124.html new file mode 100644 index 0000000..41b9540 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-124.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - |)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-47.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-47.html new file mode 100644 index 0000000..c1d8049 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-47.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - /)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-58.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-58.html new file mode 100644 index 0000000..3691430 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-58.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - :)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-92.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-92.html new file mode 100644 index 0000000..65a2fec --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-92.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - \)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-95.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-95.html new file mode 100644 index 0000000..ad632ff --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-95.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - _)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-A.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-A.html new file mode 100644 index 0000000..2ea6cb4 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-A.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - A)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-All.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-All.html new file mode 100644 index 0000000..c57d00f --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-All.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index)

tensorflow-0.1.0.0: TensorFlow bindings.

Index

/:/TensorFlow.Types
/=TensorFlow.Types, TensorFlow.Core
:/TensorFlow.Types
addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
addInitializerTensorFlow.Build
addNewOpTensorFlow.Build
addSummaryTensorFlow.Tensor
AllTensorTypesTensorFlow.Types
asGraphDefTensorFlow.Build, TensorFlow.Core
asyncProdNodesTensorFlow.Session, TensorFlow.Core
AttributeTensorFlow.Types
attrLensTensorFlow.Types
BuildTensorFlow.Build, TensorFlow.Core
buildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
BuildInputsTensorFlow.BuildOp
buildInputsTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildResultTensorFlow.BuildOp
buildResultTensorFlow.BuildOp
BuildTTensorFlow.Build, TensorFlow.Core
collectAllSummariesTensorFlow.Tensor
colocateWithTensorFlow.Tensor, TensorFlow.Core
ControlNode 
1 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
2 (Data Constructor)TensorFlow.Output, TensorFlow.Build
DataTypeTensorFlow.Types
decodeTensorDataTensorFlow.Types, TensorFlow.Core
DeleteTensorFlow.Types
Device 
1 (Type/Class)TensorFlow.Output, TensorFlow.Core
2 (Data Constructor)TensorFlow.Output, TensorFlow.Core
deviceNameTensorFlow.Output, TensorFlow.Core
DT_BFLOAT16TensorFlow.Types
DT_BFLOAT16_REFTensorFlow.Types
DT_BOOLTensorFlow.Types
DT_BOOL_REFTensorFlow.Types
DT_COMPLEX128TensorFlow.Types
DT_COMPLEX128_REFTensorFlow.Types
DT_COMPLEX64TensorFlow.Types
DT_COMPLEX64_REFTensorFlow.Types
DT_DOUBLETensorFlow.Types
DT_DOUBLE_REFTensorFlow.Types
DT_FLOATTensorFlow.Types
DT_FLOAT_REFTensorFlow.Types
DT_HALFTensorFlow.Types
DT_HALF_REFTensorFlow.Types
DT_INT16TensorFlow.Types
DT_INT16_REFTensorFlow.Types
DT_INT32TensorFlow.Types
DT_INT32_REFTensorFlow.Types
DT_INT64TensorFlow.Types
DT_INT64_REFTensorFlow.Types
DT_INT8TensorFlow.Types
DT_INT8_REFTensorFlow.Types
DT_INVALIDTensorFlow.Types
DT_QINT16TensorFlow.Types
DT_QINT16_REFTensorFlow.Types
DT_QINT32TensorFlow.Types
DT_QINT32_REFTensorFlow.Types
DT_QINT8TensorFlow.Types
DT_QINT8_REFTensorFlow.Types
DT_QUINT16TensorFlow.Types
DT_QUINT16_REFTensorFlow.Types
DT_QUINT8TensorFlow.Types
DT_QUINT8_REFTensorFlow.Types
DT_RESOURCETensorFlow.Types
DT_RESOURCE_REFTensorFlow.Types
DT_STRINGTensorFlow.Types
DT_STRING_REFTensorFlow.Types
DT_UINT16TensorFlow.Types
DT_UINT16_REFTensorFlow.Types
DT_UINT8TensorFlow.Types
DT_UINT8_REFTensorFlow.Types
encodeOutputTensorFlow.Build
encodeTensorDataTensorFlow.Types, TensorFlow.Core
eqLengthGuardTensorFlow.BuildOp
evalBuildTTensorFlow.Build
ExcludedCaseTensorFlow.Types
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
exprTensorFlow.Tensor, TensorFlow.Core
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
Feed 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Type/Class)TensorFlow.Nodes
2 (Data Constructor)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
fromTensorTypeListTensorFlow.Types
fromTensorTypesTensorFlow.Types
getAllOpListTensorFlow.Internal.FFI
getFetchTensorFlow.Nodes
getNodesTensorFlow.Nodes
getOrAddOpTensorFlow.Build
getVarIntTensorFlow.Internal.VarInt
GraphStateTensorFlow.Build
groupTensorFlow.ControlFlow, TensorFlow.Core
hoistBuildTTensorFlow.Build
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
ListTensorFlow.Types
ListOfTensorFlow.Types
lookupNodeTensorFlow.Build
MonadBuildTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
NilTensorFlow.Types
NodeName 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
NodesTensorFlow.Nodes, TensorFlow.Core
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
noOpTensorFlow.ControlFlow, TensorFlow.Core
OneOfTensorFlow.Types, TensorFlow.Core
OneOfsTensorFlow.Types
opAttrTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
opDefTensorFlow.Build
opDefWithNameTensorFlow.Build
opInputsTensorFlow.Output, TensorFlow.Build
opNameTensorFlow.Output, TensorFlow.Build, TensorFlow.Core
OpParamsTensorFlow.BuildOp
OptionsTensorFlow.Session, TensorFlow.Core
OpType 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
Output 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
outputTensorFlow.Output
outputIndexTensorFlow.Output
OutputIx 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
outputNodeNameTensorFlow.Output
PendingNodeNameTensorFlow.Output
protoShapeTensorFlow.Types
pureOpTensorFlow.BuildOp
PureResultTensorFlow.BuildOp
pureResultTensorFlow.BuildOp
putVarIntTensorFlow.Internal.VarInt
Ref 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
renderTensorFlow.Tensor, TensorFlow.Core
RenderedTensorFlow.Tensor
renderedTensorFlow.Tensor
renderedNodeDefsTensorFlow.Build
renderedOutputTensorFlow.Tensor
renderValueTensorFlow.Tensor
ResourceHandle 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
run 
1 (Function)TensorFlow.Internal.FFI
2 (Function)TensorFlow.Session, TensorFlow.Core
runBuildTTensorFlow.Build
runRefTensorFlow.Tensor
runSessionTensorFlow.Session, TensorFlow.Core
runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
runValueTensorFlow.Tensor
runWithFeedsTensorFlow.Session, TensorFlow.Core
runWithFeeds_TensorFlow.Session, TensorFlow.Core
run_TensorFlow.Session, TensorFlow.Core
Scalar 
1 (Type/Class)TensorFlow.Types, TensorFlow.Core
2 (Data Constructor)TensorFlow.Types, TensorFlow.Core
Session 
1 (Type/Class)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Session, TensorFlow.Core
sessionConfigTensorFlow.Session, TensorFlow.Core
sessionTargetTensorFlow.Session, TensorFlow.Core
sessionTracerTensorFlow.Session, TensorFlow.Core
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
Shape 
1 (Type/Class)TensorFlow.Types, TensorFlow.Core
2 (Data Constructor)TensorFlow.Types, TensorFlow.Core
summariesTensorFlow.Build
SummaryTensorTensorFlow.Tensor
Tensor 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
TensorData 
1 (Type/Class)TensorFlow.Internal.FFI
2 (Data Constructor)TensorFlow.Internal.FFI
3 (Type/Class)TensorFlow.Types, TensorFlow.Core
4 (Data Constructor)TensorFlow.Types
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
TensorDataTypeTensorFlow.Types, TensorFlow.Core
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Type/Class)TensorFlow.Internal.FFI
2 (Data Constructor)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorKindTensorFlow.Tensor
TensorListTensorFlow.Tensor
tensorListOutputsTensorFlow.Tensor
tensorNodeNameTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
tensorRefFromNameTensorFlow.Tensor
tensorRefTypeTensorFlow.Types
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypeListTensorFlow.Types
TensorTypeProxy 
1 (Type/Class)TensorFlow.Types
2 (Data Constructor)TensorFlow.Types
TensorTypesTensorFlow.Types
tensorTypesTensorFlow.Types
tensorValTensorFlow.Types
tensorValueFromNameTensorFlow.Tensor
toBuildTensorFlow.Tensor
TypeErrorTensorFlow.Types
unControlNodeTensorFlow.Output, TensorFlow.Build
UniqueTensorFlow.Build
unNodeNameTensorFlow.Output
unOpTypeTensorFlow.Output
unOutputIxTensorFlow.Output
unScalarTensorFlow.Types, TensorFlow.Core
unTensorDataTensorFlow.Types
useProtoAsVoidPtrLenTensorFlow.Internal.FFI
Value 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
valueTensorFlow.Tensor, TensorFlow.Core
withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
withDeviceTensorFlow.Build, TensorFlow.Core
withNameScopeTensorFlow.Build, TensorFlow.Core
withNodeDependenciesTensorFlow.Build
withSessionTensorFlow.Internal.FFI
withStateLensTensorFlow.Build
\\TensorFlow.Types
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_opInputsTensorFlow.Output
_opNameTensorFlow.Output
_opTypeTensorFlow.Output
\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-B.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-B.html new file mode 100644 index 0000000..1144633 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-B.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - B)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-C.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-C.html new file mode 100644 index 0000000..1104328 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-C.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - C)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-D.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-D.html new file mode 100644 index 0000000..635d049 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-D.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - D)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-E.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-E.html new file mode 100644 index 0000000..8f93c0a --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-E.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - E)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-F.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-F.html new file mode 100644 index 0000000..b68ccf2 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-F.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - F)

tensorflow-0.1.0.0: TensorFlow bindings.

Index - F

Feed 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Type/Class)TensorFlow.Nodes
2 (Data Constructor)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
fromTensorTypeListTensorFlow.Types
fromTensorTypesTensorFlow.Types
\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-G.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-G.html new file mode 100644 index 0000000..7754769 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-G.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - G)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-H.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-H.html new file mode 100644 index 0000000..5cb5a53 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-H.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - H)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-I.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-I.html new file mode 100644 index 0000000..10d6dca --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-I.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - I)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-L.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-L.html new file mode 100644 index 0000000..6432c93 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-L.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - L)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-M.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-M.html new file mode 100644 index 0000000..062d654 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-M.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - M)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-N.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-N.html new file mode 100644 index 0000000..ab37e7d --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-N.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - N)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-O.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-O.html new file mode 100644 index 0000000..809ce24 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-O.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - O)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-P.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-P.html new file mode 100644 index 0000000..3781991 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-P.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - P)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-R.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-R.html new file mode 100644 index 0000000..991398b --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-R.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - R)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-S.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-S.html new file mode 100644 index 0000000..544afa5 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-S.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - S)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-T.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-T.html new file mode 100644 index 0000000..a7f6b35 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-T.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - T)

tensorflow-0.1.0.0: TensorFlow bindings.

Index - T

Tensor 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
TensorData 
1 (Type/Class)TensorFlow.Internal.FFI
2 (Data Constructor)TensorFlow.Internal.FFI
3 (Type/Class)TensorFlow.Types, TensorFlow.Core
4 (Data Constructor)TensorFlow.Types
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
TensorDataTypeTensorFlow.Types, TensorFlow.Core
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Type/Class)TensorFlow.Internal.FFI
2 (Data Constructor)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorKindTensorFlow.Tensor
TensorListTensorFlow.Tensor
tensorListOutputsTensorFlow.Tensor
tensorNodeNameTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
tensorRefFromNameTensorFlow.Tensor
tensorRefTypeTensorFlow.Types
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypeListTensorFlow.Types
TensorTypeProxy 
1 (Type/Class)TensorFlow.Types
2 (Data Constructor)TensorFlow.Types
TensorTypesTensorFlow.Types
tensorTypesTensorFlow.Types
tensorValTensorFlow.Types
tensorValueFromNameTensorFlow.Tensor
toBuildTensorFlow.Tensor
TypeErrorTensorFlow.Types
\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-U.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-U.html new file mode 100644 index 0000000..b373d24 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-U.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - U)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-V.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-V.html new file mode 100644 index 0000000..7177415 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-V.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - V)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index-W.html b/docs/haddock/tensorflow-0.1.0.0/doc-index-W.html new file mode 100644 index 0000000..f4ff645 --- /dev/null +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index-W.html @@ -0,0 +1,4 @@ +tensorflow-0.1.0.0: TensorFlow bindings. (Index - W)

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-0.1.0.0/doc-index.html index 02c2b7e..b50be58 100644 --- a/docs/haddock/tensorflow-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ tensorflow-0.1.0.0: TensorFlow bindings. (Index)

tensorflow-0.1.0.0: TensorFlow bindings.

Index

/=TensorFlow.Types, TensorFlow.Core
addGraphDefTensorFlow.Build, TensorFlow.Session, TensorFlow.Core
addInitializerTensorFlow.Build
addNewOpTensorFlow.Build
addSummaryTensorFlow.Build
AllTensorTypesTensorFlow.Types
asGraphDefTensorFlow.Build, TensorFlow.Core
asyncProdNodesTensorFlow.Session, TensorFlow.Core
AttributeTensorFlow.Types
attrLensTensorFlow.Types
BuildTensorFlow.Build, TensorFlow.Core
buildTensorFlow.Session, TensorFlow.Core
buildAndTensorFlow.Session, TensorFlow.Core
buildListOpTensorFlow.BuildOp
BuildOpTensorFlow.BuildOp
buildOpTensorFlow.BuildOp
BuildTTensorFlow.Build, TensorFlow.Core
buildWithSummaryTensorFlow.Session, TensorFlow.Core
collectAllSummariesTensorFlow.Build
colocateWithTensorFlow.Build, TensorFlow.Core
ControlNode 
1 (Type/Class)TensorFlow.Output, TensorFlow.Build, TensorFlow.Core
2 (Data Constructor)TensorFlow.Output, TensorFlow.Build
decodeTensorDataTensorFlow.Types, TensorFlow.Core
DeleteTensorFlow.Types
Device 
1 (Type/Class)TensorFlow.Output, TensorFlow.Core
2 (Data Constructor)TensorFlow.Output, TensorFlow.Core
deviceNameTensorFlow.Output, TensorFlow.Core
encodeTensorDataTensorFlow.Types, TensorFlow.Core
eqLengthGuardTensorFlow.BuildOp
evalBuildTTensorFlow.Build
ExcludedCaseTensorFlow.Types
ExplicitNameTensorFlow.Output
explicitNameTensorFlow.Build
extendTensorFlow.Session
extendGraphTensorFlow.Internal.FFI
Feed 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
feedTensorFlow.Tensor, TensorFlow.Core
Fetch 
1 (Type/Class)TensorFlow.Nodes
2 (Data Constructor)TensorFlow.Nodes
FetchableTensorFlow.Nodes, TensorFlow.Core
fetchesTensorFlow.Nodes
fetchRestoreTensorFlow.Nodes
fetchTensorListTensorFlow.Nodes
fetchTensorVectorTensorFlow.Nodes
flushInitializersTensorFlow.Build
flushNodeBufferTensorFlow.Build
getAllOpListTensorFlow.Internal.FFI
getFetchTensorFlow.Nodes
getNodesTensorFlow.Nodes
getOrAddOpTensorFlow.Build
getVarIntTensorFlow.Internal.VarInt
GraphStateTensorFlow.Build
groupTensorFlow.ControlFlow, TensorFlow.Core
hoistBuildTTensorFlow.Build
identityTensorFlow.ControlFlow, TensorFlow.Core
ImplicitNameTensorFlow.Output
implicitNameTensorFlow.Build
namedTensorFlow.ControlFlow, TensorFlow.Core
NodeName 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
NodesTensorFlow.Nodes, TensorFlow.Core
nodesUnionTensorFlow.Nodes
NoneOfTensorFlow.Types
noOpTensorFlow.ControlFlow, TensorFlow.Core
OneOfTensorFlow.Types, TensorFlow.Core
OpTensorFlow.Output
opAttrTensorFlow.Output, TensorFlow.Build
opControlInputsTensorFlow.Output, TensorFlow.Build
OpDef 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
opDefTensorFlow.Build
opDefWithNameTensorFlow.Build
opInputsTensorFlow.Output, TensorFlow.Build
opNameTensorFlow.Output, TensorFlow.Build
OpResultTensorFlow.BuildOp
OptionsTensorFlow.Session, TensorFlow.Core
OpType 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
opTypeTensorFlow.Output, TensorFlow.Build
opUnrenderedTensorFlow.Output
Output 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
outputTensorFlow.Output
outputIndexTensorFlow.Output
OutputIx 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
outputOpTensorFlow.Output
PendingNodeNameTensorFlow.Output
protoShapeTensorFlow.Types
putVarIntTensorFlow.Internal.VarInt
RefTensorFlow.Tensor, TensorFlow.Core
RefKindTensorFlow.Tensor, TensorFlow.Core
renderTensorFlow.Build, TensorFlow.Core
RenderedTensorFlow.Output
renderedNodeDefsTensorFlow.Build
renderNodeNameTensorFlow.Build
renderOutputTensorFlow.Build
ResourceHandle 
1 (Type/Class)TensorFlow.Output
2 (Data Constructor)TensorFlow.Output
run 
1 (Function)TensorFlow.Internal.FFI
2 (Function)TensorFlow.Session, TensorFlow.Core
runBuildTTensorFlow.Build
runSessionTensorFlow.Session, TensorFlow.Core
runSessionWithOptionsTensorFlow.Session, TensorFlow.Core
runWithFeedsTensorFlow.Session, TensorFlow.Core
runWithFeeds_TensorFlow.Session, TensorFlow.Core
run_TensorFlow.Session, TensorFlow.Core
Scalar 
1 (Type/Class)TensorFlow.Nodes, TensorFlow.Core
2 (Data Constructor)TensorFlow.Nodes, TensorFlow.Core
Session 
1 (Type/Class)TensorFlow.Internal.FFI
2 (Type/Class)TensorFlow.Session, TensorFlow.Core
sessionConfigTensorFlow.Session, TensorFlow.Core
sessionTargetTensorFlow.Session, TensorFlow.Core
sessionTracerTensorFlow.Session, TensorFlow.Core
setSessionConfigTensorFlow.Internal.FFI
setSessionTargetTensorFlow.Internal.FFI
Shape 
1 (Type/Class)TensorFlow.Types, TensorFlow.Core
2 (Data Constructor)TensorFlow.Types, TensorFlow.Core
SummaryTensorTensorFlow.Build
Tensor 
1 (Type/Class)TensorFlow.Tensor, TensorFlow.Core
2 (Data Constructor)TensorFlow.Tensor
tensorAttrTensorFlow.Tensor, TensorFlow.Core
TensorData 
1 (Type/Class)TensorFlow.Internal.FFI
2 (Data Constructor)TensorFlow.Internal.FFI
3 (Type/Class)TensorFlow.Types, TensorFlow.Core
4 (Data Constructor)TensorFlow.Types
tensorDataBytesTensorFlow.Internal.FFI
tensorDataDimensionsTensorFlow.Internal.FFI
tensorDataTypeTensorFlow.Internal.FFI
TensorFlowException 
1 (Type/Class)TensorFlow.Internal.FFI
2 (Data Constructor)TensorFlow.Internal.FFI
tensorFromNameTensorFlow.Tensor, TensorFlow.Core
TensorKindTensorFlow.Tensor, TensorFlow.Core
tensorKindTensorFlow.Tensor
tensorOutputTensorFlow.Tensor
tensorRefTypeTensorFlow.Types
TensorTypeTensorFlow.Types, TensorFlow.Core
tensorTypeTensorFlow.Types
TensorTypesTensorFlow.Types
tensorValTensorFlow.Types
TypeErrorTensorFlow.Types
unControlNodeTensorFlow.Output, TensorFlow.Build
UniqueTensorFlow.Build
unNodeNameTensorFlow.Output
unOpTypeTensorFlow.Output
unOutputIxTensorFlow.Output
UnrenderedTensorFlow.Output
unScalarTensorFlow.Nodes, TensorFlow.Core
unTensorDataTensorFlow.Types
useProtoAsVoidPtrLenTensorFlow.Internal.FFI
ValueTensorFlow.Tensor, TensorFlow.Core
valueTensorFlow.Tensor, TensorFlow.Core
ValueKindTensorFlow.Tensor, TensorFlow.Core
withControlDependenciesTensorFlow.ControlFlow, TensorFlow.Core
withDeviceTensorFlow.Build, TensorFlow.Core
withNameScopeTensorFlow.Build, TensorFlow.Core
withNodeDependenciesTensorFlow.Build
withSessionTensorFlow.Internal.FFI
withStateLensTensorFlow.Build
\\TensorFlow.Types
_opAttrsTensorFlow.Output
_opControlInputsTensorFlow.Output
_opInputsTensorFlow.Output
_opNameTensorFlow.Output
_opTypeTensorFlow.Output
\ No newline at end of file +

tensorflow-0.1.0.0: TensorFlow bindings.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Build.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Build.html index a4b221c..cc90c09 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Build.html +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Build.html @@ -1,4 +1,4 @@ TensorFlow.Build

TensorFlow.Build

\ No newline at end of file +

TensorFlow.Build

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-BuildOp.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-BuildOp.html index cd0e477..8f06a64 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-BuildOp.html +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-BuildOp.html @@ -1,4 +1,4 @@ TensorFlow.BuildOp

TensorFlow.BuildOp

\ No newline at end of file +

TensorFlow.BuildOp

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-ControlFlow.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-ControlFlow.html index 3d5fc95..5b06fa8 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-ControlFlow.html +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-ControlFlow.html @@ -1,4 +1,4 @@ TensorFlow.ControlFlow

TensorFlow.ControlFlow

\ No newline at end of file +

TensorFlow.ControlFlow

Dependencies

Operations

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Core.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Core.html index 8144ad6..04eef8c 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Core.html +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Core.html @@ -1,4 +1,4 @@ TensorFlow.Core

TensorFlow.Core

\ No newline at end of file +

TensorFlow.Core

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Nodes.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Nodes.html index 6765575..e5ea2c1 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Nodes.html +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Nodes.html @@ -1,4 +1,4 @@ TensorFlow.Nodes

TensorFlow.Nodes

\ No newline at end of file +

TensorFlow.Nodes

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Output.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Output.html index cd6f047..27d11f4 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Output.html +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Output.html @@ -1,4 +1,4 @@ TensorFlow.Output

TensorFlow.Output

\ No newline at end of file +

TensorFlow.Output

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html index 1007e01..b148171 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Session.html @@ -1,4 +1,4 @@ TensorFlow.Session

TensorFlow.Session

\ No newline at end of file +

TensorFlow.Session

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html index cdab973..8442ee8 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Tensor.html @@ -1,4 +1,4 @@ TensorFlow.Tensor

TensorFlow.Tensor

\ No newline at end of file +

TensorFlow.Tensor

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html index 5ba106f..bb67294 100644 --- a/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html +++ b/docs/haddock/tensorflow-0.1.0.0/mini_TensorFlow-Types.html @@ -1,4 +1,4 @@ TensorFlow.Types

TensorFlow.Types

class TensorType a

data TensorData a

data Shape

class Attribute a

Type constraints

type OneOf ts a

type family a /= b :: Constraint

Implementation of constraints

data TypeError a

type family TensorTypes ts :: Constraint

type family NoneOf ts a :: Constraint

type family as \\ bs

type family Delete a as

\ No newline at end of file +

TensorFlow.Types

class TensorType a

data TensorData a

class TensorDataType s a

data Scalar a

data Shape

class Attribute a

Lists

data ListOf f as

type List

class TensorTypes ts

Type constraints

type OneOf ts a

type family a /= b :: Constraint

type OneOfs ts as

Implementation of constraints

data TypeError a

type family NoneOf ts a :: Constraint

type family as \\ bs

type family Delete a as

\ No newline at end of file diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Build.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Build.html deleted file mode 100644 index 57b54be..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Build.html +++ /dev/null @@ -1,387 +0,0 @@ - - - - - -src/TensorFlow/Build.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE GeneralizedNewtypeDeriving #-}
-{-# LANGUAGE LambdaCase #-}
-{-# LANGUAGE Rank2Types #-}
-{-# LANGUAGE OverloadedStrings #-}
-module TensorFlow.Build
-    ( -- * Graph node types
-      ControlNode(..)
-    , Unique
-    -- * Ops
-    , explicitName
-    , implicitName
-    , opDef
-    , opDefWithName
-    , opName
-    , opType
-    , opAttr
-    , opInputs
-    , opControlInputs
-    -- * The Build monad
-    , GraphState
-    , render
-    , renderNodeName
-    , renderedNodeDefs
-    , BuildT
-    , Build
-    , addInitializer
-    , hoistBuildT
-    , evalBuildT
-    , runBuildT
-    , asGraphDef
-    , addGraphDef
-    , flushInitializers
-    , flushNodeBuffer
-    -- * Creating and looking up Ops
-    , getOrAddOp
-    , addNewOp
-    , renderOutput
-    -- * Modifying all nodes in a Build action
-    , colocateWith
-    , withStateLens
-    , withDevice
-    , withNameScope
-    , withNodeDependencies
-    -- * Internal Summary related bits.
-    , addSummary
-    , SummaryTensor
-    , collectAllSummaries
-    ) where
-
-import Control.Monad.IO.Class (MonadIO(..))
-import Control.Monad.Trans.Class (MonadTrans(..))
-import Control.Monad.Trans.State.Strict(StateT(..), mapStateT, evalStateT)
-import Data.ByteString (ByteString)
-import Data.Default (def)
-import Data.Functor.Identity (Identity(..))
-import qualified Data.Map.Strict as Map
-import Data.Monoid ((<>))
-import qualified Data.Set as Set
-import Data.Set (Set)
-import Data.String (IsString(..))
-import Data.Text (Text)
-import qualified Data.Text as Text
-import Lens.Family2 (Lens', (.~), (^.), (&))
-import Lens.Family2.State.Strict (MonadState, use, uses, (.=), (<>=), (%=))
-import Lens.Family2.Unchecked (lens)
-import Proto.Tensorflow.Core.Framework.Graph
-    ( GraphDef
-    , node
-    )
-import Proto.Tensorflow.Core.Framework.NodeDef
-    ( NodeDef
-    , attr
-    , input
-    , device
-    , name
-    , op
-    )
-
-import TensorFlow.Orphans ()
-import TensorFlow.Output
-import TensorFlow.Tensor
-
-newtype Unique = Unique Int
-    deriving (Eq, Ord, Enum)
-
---------------
-
-implicitName :: PendingNodeName
-implicitName = ImplicitName
-
-explicitName :: Text -> PendingNodeName
-explicitName = ExplicitName
-
-newtype Scope = Scope {unScope :: Text}
-    deriving (Eq, Ord, IsString)
-
-instance Show Scope where
-    show = show . unScope
-
-opDef :: OpType -> OpDef
-opDef = opDefWithName ImplicitName
-
-opDefWithName :: PendingNodeName -> OpType -> OpDef
-opDefWithName n t = OpDef
-    { _opName = n
-    , _opType = t
-    , _opAttrs = Map.empty
-    , _opInputs = []
-    , _opControlInputs = []
-    }
-
--- | Synonym for the tensors that return serialized Summary proto.
-type SummaryTensor = Tensor Value ByteString
-
-data GraphState = GraphState
-    { _renderedNodes :: !(Map.Map PendingNode NodeDef)
-        -- ^ Nodes which have been rendered.  Keeps track of the unique ID we
-        -- assign each implicitly-named node.  Also prevents us from adding the
-        -- same node (implicit or explicit) more than once to the nodeBuffer.
-    , _renderedNodeDefs :: !(Map.Map NodeName NodeDef)
-        -- ^ The NodeDefs of nodes which have been rendered. Used by the
-        -- Gradient module to inspect the node graph.
-    , _nodeBuffer :: [NodeDef]
-        -- ^ A list of nodes that should be passed to TensorFlow during
-        -- the next call to Session.extend (TF_ExtendGraph).
-    , _nextUnique :: !Unique
-        -- ^ Unique ID for the next node
-    -- TODO(judahjacobson): watch for clashes between auto and user names.
-    , _defaultDevice :: !(Maybe Device)
-    , _currentScope :: [Scope]
-    , _defaultControlInputs :: !(Set NodeName)
-    , _initializationNodes  :: [NodeName]
-      -- ^ The nodes to run next time a TF.run is issued, typically
-      -- variable initializers.
-    , _summaries :: [SummaryTensor]
-      -- ^ The tensors for summary
-    }
-
--- | A node definition without its final name.  Used as a key in the
--- "renderedNodes" map.
--- The NodeDef contained inside has an empty "name" field.
-data PendingNode = PendingNode [Scope] !PendingNodeName !NodeDef
-    deriving (Eq, Ord)
-
--- Returns an _incomplete_ NodeDef. The name is fixed by addNewOpFromPending.
-pendingNodeDef :: PendingNode -> NodeDef
-pendingNodeDef (PendingNode _ _ n) = n
-
-initGraphState :: GraphState
-initGraphState =
-    GraphState Map.empty Map.empty [] (Unique 0) Nothing [] Set.empty [] []
-
-renderedNodes :: Lens' GraphState (Map.Map PendingNode NodeDef)
-renderedNodes = lens _renderedNodes (\g x -> g { _renderedNodes = x })
-
-renderedNodeDefs :: Lens' GraphState (Map.Map NodeName NodeDef)
-renderedNodeDefs = lens _renderedNodeDefs (\g x -> g { _renderedNodeDefs = x })
-
-nodeBuffer :: Lens' GraphState [NodeDef]
-nodeBuffer = lens _nodeBuffer (\g x -> g { _nodeBuffer = x })
-
-nextUnique :: Lens' GraphState Unique
-nextUnique = lens _nextUnique (\g x -> g { _nextUnique = x })
-
-defaultDevice :: Lens' GraphState (Maybe Device)
-defaultDevice = lens _defaultDevice (\g x -> g { _defaultDevice = x })
-
-currentScope :: Lens' GraphState [Scope]
-currentScope = lens _currentScope (\g x -> g { _currentScope = x })
-
-defaultControlInputs :: Lens' GraphState (Set NodeName)
-defaultControlInputs = lens _defaultControlInputs
-                          (\g x -> g { _defaultControlInputs = x })
-
-initializationNodes :: Lens' GraphState [NodeName]
-initializationNodes = lens _initializationNodes (\g x -> g { _initializationNodes = x })
-
-summaries :: Lens' GraphState [SummaryTensor]
-summaries = lens _summaries (\g x -> g { _summaries = x })
-
--- | An action for building nodes in a TensorFlow graph.
--- Used to manage build state internally as part of the @Session@ monad.
-newtype BuildT m a = BuildT (StateT GraphState m a)
-    deriving (Functor, Applicative, Monad, MonadIO, MonadTrans,
-              MonadState GraphState)
-
--- | An action for building nodes in a TensorFlow graph.
-type Build = BuildT Identity
-
--- | This is Control.Monad.Morph.hoist sans the dependency.
-hoistBuildT :: (forall a . m a -> n a) -> BuildT m b -> BuildT n b
-hoistBuildT f (BuildT m) = BuildT $ mapStateT f m
-
-runBuildT :: BuildT m a -> m (a, GraphState)
-runBuildT (BuildT f) = runStateT f initGraphState
-
-evalBuildT :: Monad m => BuildT m a -> m a
-evalBuildT (BuildT f) = evalStateT f initGraphState
-
--- | Get all the NodeDefs that have accumulated so far, and clear that buffer.
-flushNodeBuffer :: Monad m => BuildT m [NodeDef]
-flushNodeBuffer = do
-    ns <- use nodeBuffer
-    nodeBuffer .= []
-    return ns
-
--- | Get all the initializers that have accumulated so far, and clear
--- that buffer.
-flushInitializers :: Monad m => BuildT m [NodeName]
-flushInitializers = do
-    ns <- use initializationNodes
-    initializationNodes .= []
-    return ns
-
--- | Registers the given node to be executed before the next
--- 'TensorFlow.Session.run'.
-addInitializer :: ControlNode -> Build ()
-addInitializer (ControlNode o) = do
-    i <- getOrAddOp o
-    initializationNodes %= (i:)
-
--- | Produce a GraphDef proto representation of the nodes that are rendered in
--- the given 'Build' action.
-asGraphDef :: Build a -> GraphDef
-asGraphDef b = def & node .~ gs ^. nodeBuffer
-  where
-    gs = snd $ runIdentity $ runBuildT b
-
--- TODO: check against existing nodes for conflicts?
-addGraphDef :: GraphDef -> Build ()
-addGraphDef g = nodeBuffer <>= g ^. node
-
--- | Render the given op if it hasn't been rendered already, and return its
--- name.
-getOrAddOp :: Op -> Build NodeName
-getOrAddOp o = NodeName . (^. name) <$> resolveOp o
-
-resolveOp :: Op -> Build NodeDef
-resolveOp (Rendered n) = return n
-resolveOp (Unrendered o) = do
-    pending <- getPendingNode o
-    uses renderedNodes (Map.lookup pending) >>= \case
-        Just n -> return n
-        Nothing -> addNewOpFromPending pending
-
--- | Add a new node for a given 'OpDef'.  This is used for making "stateful" ops
--- which are not safe to dedup (e.g, "variable" and "assign").
-addNewOp :: OpDef -> Build NodeDef
-addNewOp o = getPendingNode o >>= addNewOpFromPending
-
-addNewOpFromPending :: PendingNode -> Build NodeDef
-addNewOpFromPending pending = do
-    nodeName <- renderPendingNode pending
-    let nodeDef = pendingNodeDef pending & name .~ unNodeName nodeName
-    nodeBuffer %= (nodeDef :)
-    renderedNodes %= Map.insert pending nodeDef
-    renderedNodeDefs %= Map.insert nodeName nodeDef
-    return nodeDef
-
--- | Get the pending node corresponding to an OpDef, which may or may not have
--- been rendered before.  Implicitly renders all of this node's inputs.
-getPendingNode :: OpDef -> Build PendingNode
-getPendingNode o = do
-    -- An empty string in the proto field means that no specific
-    -- device is specified.
-    dev <- maybe "" deviceName <$> use defaultDevice
-    inputs <- mapM getInput (o ^. opInputs)
-    scope <- use currentScope
-    controls <- use defaultControlInputs
-    let controlInputs
-            = map getDep (o ^. opControlInputs ++ Set.toList controls)
-    return $ PendingNode scope (o ^. opName)
-            $ def & op .~ (unOpType (o ^. opType) :: Text)
-                  & attr .~ _opAttrs o
-                  & input .~ (inputs ++ controlInputs)
-                  & device .~ dev
-  where
-    getInput (Output (OutputIx k) subOp)
-        = (<> ":" <> Text.pack (show k)) . unNodeName <$> getOrAddOp subOp
-    getDep = ("^" <>) . unNodeName
-
--- | Pick a name for a pending node.  If it has an explicit name, just use that;
--- if the name is implicit, assign a new unique name based on the op type.
-renderPendingNode :: PendingNode -> Build NodeName
-renderPendingNode (PendingNode scope pendingName nodeDef)
-    = NodeName . (scopePrefix <>) <$> getName
-  where
-    scopePrefix = Text.concat $ fmap ((<> "/") . unScope) scope
-    getName = case pendingName of
-        ExplicitName n -> return n
-        ImplicitName -> do
-            u@(Unique k) <- use nextUnique
-            nextUnique .= succ u
-            return $ nodeDef ^. op <> "_" <> Text.pack (show k)
-
-
--- | Render an 'Output' and return a string representation for the TensorFlow
--- foreign APIs.
-renderOutput :: Output -> Build Text
-renderOutput (Output (OutputIx i) o) = do
-    n <- getOrAddOp o
-    return $ unNodeName n <> Text.pack (":" ++ show i)
-
--- | Modify some part of the state, run an action, and restore the state
--- after that action is done.
-withStateLens :: MonadState s m => Lens' s a -> (a -> a) -> m b -> m b
-withStateLens accessor f act = do
-    old <- use accessor
-    accessor %= f
-    result <- act
-    accessor .= old
-    return result
-
--- | Set a device for all nodes rendered in the given 'Build' action
--- (unless further overridden by another use of withDevice).
-withDevice :: Maybe Device -> Build a -> Build a
-withDevice d = withStateLens defaultDevice (const d)
-
--- | Places all nodes rendered in the given 'Build' action on the same
--- device as the given Tensor (see also 'withDevice'). Make sure that
--- the action has side effects of rendering the desired tensors. A pure
--- return would not have the desired effect.
-colocateWith :: forall a v b . Tensor v b -> Build a -> Build a
-colocateWith t x = do
-    d <- Device . (^. device) <$> resolveOp (t ^. tensorOutput . outputOp)
-    withDevice (Just d) x
-
--- | Prepend a scope to all nodes rendered in the given 'Build' action.
-withNameScope :: Text -> Build a -> Build a
-withNameScope s = withStateLens currentScope (Scope s :)
-
--- | Add control inputs to all nodes rendered in the given 'Build' action.
-withNodeDependencies :: Set NodeName -> Build a -> Build a
-withNodeDependencies nodes = withStateLens defaultControlInputs (<> nodes)
-
--- | Render a 'Tensor', fixing its name, scope, device and control inputs from
--- the 'Build' context.  Also renders any dependencies of the 'Tensor' that
--- weren't already rendered.
---
--- This operation is idempotent; @render >=> render === render@.  However,
--- rendering a (previously un-rendered) 'Tensor' in two different contexts
--- may result in two different 'Tensor's.
-render :: Tensor v a -> Build (Tensor v a)
-render = tensorOutput $ outputOp $ fmap Rendered . resolveOp
-
--- | Render a 'Tensor' and get its node's name.
-renderNodeName :: Tensor v a -> Build NodeName
-renderNodeName t = getOrAddOp (t ^. tensorOutput . outputOp)
-
--- | Records the given summary action in Build for retrieval with
--- 'collectAllSummaries'. The summary op is required to produce a
--- Summary protocol buffer in string form. For safety, use the
--- pre-composed functions: Logging.scalarSummary and
--- Logging.histogramSummary.
-addSummary :: SummaryTensor -> Build ()
-addSummary t = summaries %= (t :)
-
--- | Retrieves the summary ops collected thus far. Typically this only
--- happens once, but if 'TensorFlow.Session.buildWithSummary' is used
--- repeatedly, the values accumulate.
-collectAllSummaries :: Monad m => BuildT m [SummaryTensor]
-collectAllSummaries = use summaries
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-BuildOp.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-BuildOp.html deleted file mode 100644 index 93095ce..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-BuildOp.html +++ /dev/null @@ -1,222 +0,0 @@ - - - - - -src/TensorFlow/BuildOp.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE FlexibleInstances #-}
-{-# LANGUAGE TupleSections #-}
-
-module TensorFlow.BuildOp
-    ( OpResult
-    , BuildOp
-    , buildOp
-    , buildListOp
-    , eqLengthGuard
-    )
-  where
-
-import Control.Monad (replicateM)
-import Control.Monad.Reader (ReaderT, runReaderT, ask)
-import Control.Monad.State.Strict (State, runState, get, put)
-import Data.Int (Int64)
-import Lens.Family2 ((&), (<>~), (^.))
-
-import TensorFlow.Build
-import TensorFlow.Output
-import TensorFlow.Tensor
-
-data ResultState = ResultState !OutputIx [Int64] deriving Show
-
-type Result = ReaderT Op (State ResultState)
-
--- | Class of types that can be used as op outputs.
-class OpResult a where
-    toResult :: Result a
-
-instance (OpResult a1, OpResult a2) => OpResult (a1, a2) where
-    toResult = (,) <$> toResult <*> toResult
-
-instance (OpResult a1, OpResult a2, OpResult a3) => OpResult (a1, a2, a3) where
-    toResult = (,,) <$> toResult <*> toResult <*> toResult
-
-instance (OpResult a1, OpResult a2, OpResult a3, OpResult a4)
-         => OpResult (a1, a2, a3, a4) where
-    toResult = (,,,) <$> toResult <*> toResult <*> toResult <*> toResult
-
-instance (OpResult a1, OpResult a2, OpResult a3, OpResult a4, OpResult a5)
-         => OpResult (a1, a2, a3, a4, a5) where
-    toResult = (,,,,) <$> toResult
-                      <*> toResult
-                      <*> toResult
-                      <*> toResult
-                      <*> toResult
-
-instance ( OpResult a1
-         , OpResult a2
-         , OpResult a3
-         , OpResult a4
-         , OpResult a5
-         , OpResult a6
-         )
-         => OpResult (a1, a2, a3, a4, a5, a6) where
-    toResult = (,,,,,)
-               <$> toResult
-               <*> toResult
-               <*> toResult
-               <*> toResult
-               <*> toResult
-               <*> toResult
-
-tensorResult :: TensorKind v -> Result (Tensor v a)
-tensorResult v = Tensor v <$> recordResult
-
-recordResult :: Result Output
-recordResult = do
-    o <- ask
-    ResultState i ns <- get
-    put $! ResultState (i+1) ns
-    return $! output i o
-
-instance OpResult (ResourceHandle a) where
-    toResult = ResourceHandle <$> recordResult
-
-instance OpResult (Tensor Value a) where
-    toResult = tensorResult ValueKind
-
-instance OpResult (Tensor Ref a) where
-    toResult = tensorResult RefKind
-
-instance OpResult ControlNode where
-    toResult = ControlNode <$> ask
-
-instance OpResult a => OpResult [a] where
-    toResult = do
-        ResultState i ns <- get
-        case ns of
-            [] -> error $ "Ran out of counts in toResult. " ++
-                          "Likely misuse of buildListOp."
-            (n : rest) -> do
-                put $! ResultState i rest
-                replicateM (fromIntegral n) toResult
-
-runResult :: OpResult a => [Int64] -> Op -> a
-runResult ns o =
-    case runState (runReaderT toResult o) (ResultState 0 ns) of
-        (x, ResultState _ []) -> x
-        (_, ns') -> error $ "Ununsed length in runResult attributes: " ++
-                            show (ns, ns')
-
--- | Make a new "pure" op, which may be deduped with identical ops within
--- the same scope.
-pureResult :: OpResult a => [Int64] -> OpDef -> [Output] -> a
-pureResult ns o ts = runResult ns $ Unrendered $ addReversedInputs o ts
-
--- | Make a new "stateful" op, which will not be deduped with otherwise
--- identical ops.
-buildResult :: OpResult a => [Int64] -> OpDef -> [Output] -> Build a
-buildResult ns o ts
-    = runResult ns . Rendered <$> addNewOp (addReversedInputs o ts)
-
-addReversedInputs :: OpDef -> [Output] -> OpDef
-addReversedInputs o ts = o & opInputs <>~ reverse ts
-
--- | Class of types that can be used as op functions.
-class BuildOp f where
-    buildOp' :: [Int64]  -- ^ Sizes of list results (having number_attr)
-             -> OpDef
-             -> [Output] -- ^ Accumulator for inputs to the op.
-             -> f
-
--- | Starts an operation that returns a structured set of tensors
--- (singletons or tuples).
-buildOp :: BuildOp f => OpDef -> f
-buildOp o = buildOp' [] o []
-
--- | Starts an operation that returns a list of tensors.
-buildListOp :: BuildOp f => [Int64]
-               -- ^ Cardinality of the corresponding list of tensors output.
-               -> OpDef -> f
-buildListOp counts o = buildOp' counts o []
-
-instance BuildOp ControlNode where
-    buildOp' _ o ts = ControlNode $ Unrendered $ addReversedInputs o ts
-
-instance BuildOp (ResourceHandle a) where
-    buildOp' = pureResult
-
-instance BuildOp (Tensor Value a) where
-    buildOp' = pureResult
-
-instance BuildOp (Tensor Ref a) where
-    buildOp' = pureResult
-
-instance BuildOp [Tensor Value a] where
-    buildOp' = pureResult
-
-instance (OpResult t1, OpResult t2) => BuildOp (t1, t2) where
-    buildOp' = pureResult
-
-instance (OpResult t1, OpResult t2, OpResult t3) => BuildOp (t1, t2, t3) where
-    buildOp' = pureResult
-
-instance (OpResult t1, OpResult t2, OpResult t3, OpResult t4)
-         => BuildOp (t1, t2, t3, t4) where
-    buildOp' = pureResult
-
-instance (OpResult t1, OpResult t2, OpResult t3, OpResult t4, OpResult t5)
-         => BuildOp (t1, t2, t3, t4, t5) where
-    buildOp' = pureResult
-
-instance ( OpResult t1
-         , OpResult t2
-         , OpResult t3
-         , OpResult t4
-         , OpResult t5
-         , OpResult t6
-         )
-         => BuildOp (t1, t2, t3, t4, t5, t6) where
-    buildOp' = pureResult
-
-instance OpResult a => BuildOp (Build a) where
-    buildOp' = buildResult
-
-instance BuildOp f => BuildOp (ResourceHandle a -> f) where
-    buildOp' rf o ts (ResourceHandle t) = buildOp' rf o (t : ts)
-
-instance BuildOp f => BuildOp (Tensor v a -> f) where
-    buildOp' rf o ts t = buildOp' rf o (t ^. tensorOutput : ts)
-
-instance BuildOp f => BuildOp ([Tensor v a] -> f) where
-    buildOp' rf o accum ts
-        = buildOp' rf o (reverse (fmap (^. tensorOutput) ts) ++ accum)
-
--- | Returns true if all the integers in each tuple are identical.
--- Throws an error with a descriptive message if not.
-eqLengthGuard :: [(String, [(String, Int)])] -> Bool
-eqLengthGuard = all eachOk
-  where
-    eachOk (_, []) = True
-    -- The next line has (== 1) . length . nub in disguise
-    eachOk (numberAttrName, pairs@((_, x) : zs)) = all (\z -> snd z == x) zs ||
-        error ("number_attr " ++ numberAttrName ++
-               " contains tensors with different length " ++ show pairs)
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-ControlFlow.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-ControlFlow.html deleted file mode 100644 index 091a8cf..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-ControlFlow.html +++ /dev/null @@ -1,98 +0,0 @@ - - - - - -src/TensorFlow/ControlFlow.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE GADTs #-}
-{-# LANGUAGE OverloadedStrings #-}
-{-# LANGUAGE RankNTypes #-}
-{-# LANGUAGE ScopedTypeVariables #-}
-
-module TensorFlow.ControlFlow
-    ( -- * Dependencies
-      withControlDependencies
-    , group
-      -- * Operations
-    , identity
-    , noOp
-    , named
-    ) where
-
-import qualified Data.Set as Set
-import Data.Text (Text)
-import Lens.Family2 ((&), (^.), (.~))
-
-import TensorFlow.BuildOp
-import TensorFlow.Build
-import TensorFlow.Nodes
-import TensorFlow.Output
-import TensorFlow.Tensor
-import TensorFlow.Types
-
--- | Modify a 'Build' action, such that all new ops rendered in it will depend
--- on the nodes in the first argument.
-withControlDependencies :: Nodes t => t -> Build a -> Build a
-withControlDependencies deps act = do
-    nodes <- getNodes deps
-    withNodeDependencies nodes act
-
--- TODO(judahjacobson): Reimplement withDependencies.
-
--- | Create an op that groups multiple operations.
---
--- When this op finishes, all ops in the input @n@ have finished.  This op has
--- no output.
-group :: Nodes t => t -> Build ControlNode
-group deps = do
-    nodes <- Set.toList <$> getNodes deps
-    -- TODO: slicker way
-    return $ buildOp $ opDef "NoOp" & opControlInputs .~ nodes
-
-
--- | Returns a 'Tensor' with the same shape and contents as the input.
-identity :: TensorType a => Tensor v a -> Tensor v a
-identity = namedIdentity implicitName
-
--- | Returns a 'Tensor' with a given name and the same shape and contents as
--- the input.
---
--- TODO(judahjacobson): This breaks when used with uninitialize @Tensor Ref@s,
--- since @RefIdentity@ doesn't have SetAllowsUninitializedInput().  Look into
--- whether we can change that op.
-named :: TensorType a => Text -> Tensor v a -> Tensor v a
-named = namedIdentity . explicitName
-
--- | An internal version of "identity" that allows setting the name
--- of the output Tensor.
-namedIdentity :: forall a v . TensorType a
-              => PendingNodeName -> Tensor v a -> Tensor v a
-namedIdentity n t = case t ^. tensorKind of
-                      ValueKind -> buildOp (opDefWithName n "Identity" & setTypeAttr) t
-                      RefKind -> buildOp (opDefWithName n "RefIdentity" & setTypeAttr) t
-  where
-    setTypeAttr = opAttr "T" .~ tensorType (undefined :: a)
-
-
--- | Does nothing.  Only useful as a placeholder for control edges.
-noOp :: ControlNode
-noOp = buildOp $ opDef "NoOp"
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Core.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Core.html deleted file mode 100644 index 9548c43..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Core.html +++ /dev/null @@ -1,104 +0,0 @@ - - - - - -src/TensorFlow/Core.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE ExplicitNamespaces #-}
-
--- | The core functionality of TensorFlow.
---
--- Unless you are defining ops, you do not need to import other modules from
--- this package.
---
--- Basic ops are provided in the tensorflow-ops and tensorflow-core-ops
--- packages.
-module TensorFlow.Core
-    ( -- * Session
-      Session
-    , Options
-    , sessionConfig
-    , sessionTarget
-    , sessionTracer
-    , runSession
-    , runSessionWithOptions
-      -- ** Building graphs
-    , build
-    , buildAnd
-    , buildWithSummary
-      -- ** Running graphs
-    , Fetchable
-    , Scalar(..)
-    , Nodes
-    , run
-    , run_
-    , Feed
-    , feed
-    , runWithFeeds
-    , runWithFeeds_
-      -- ** Async
-    , asyncProdNodes
-
-      -- * Build
-    , Build
-    , BuildT
-    , render
-    , asGraphDef
-    , addGraphDef
-
-      -- * Tensor
-    , ControlNode
-    , Tensor
-    , Value
-    , Ref
-    , TensorKind(..)
-    , tensorAttr
-    , value
-    , tensorFromName
-      -- ** Element types
-    , TensorData
-    , TensorType(decodeTensorData, encodeTensorData)
-    , Shape(..)
-    , OneOf
-    , type (/=)
-
-      -- * Op combinators
-    , colocateWith
-    , Device(..)
-    , withDevice
-    , withNameScope
-    , named
-      -- ** Dependencies
-    , withControlDependencies
-    , group
-      -- ** Misc
-    , identity
-    , noOp
-    ) where
-
-import TensorFlow.Build
-import TensorFlow.ControlFlow
-import TensorFlow.Nodes
-import TensorFlow.Output
-import TensorFlow.Session
-import TensorFlow.Tensor
-import TensorFlow.Types
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-FFI.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-FFI.html deleted file mode 100644 index 9ed5056..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-FFI.html +++ /dev/null @@ -1,268 +0,0 @@ - - - - - -src/TensorFlow/Internal/FFI.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE DeriveDataTypeable #-}
-{-# LANGUAGE OverloadedStrings #-}
-{-# LANGUAGE ScopedTypeVariables #-}
-
-module TensorFlow.Internal.FFI
-    ( TensorFlowException(..)
-    , Raw.Session
-    , withSession
-    , extendGraph
-    , run
-    , TensorData(..)
-    , setSessionConfig
-    , setSessionTarget
-    , getAllOpList
-      -- * Internal helper.
-    , useProtoAsVoidPtrLen
-    )
-    where
-
-import Control.Concurrent.Async (Async, async, cancel, waitCatch)
-import Control.Concurrent.MVar (MVar, modifyMVarMasked_, newMVar, takeMVar)
-import Control.Exception (Exception, throwIO, bracket, finally, mask_)
-import Control.Monad (when)
-import Data.Bits (Bits, toIntegralSized)
-import Data.Int (Int64)
-import Data.Maybe (fromMaybe)
-import Data.Typeable (Typeable)
-import Data.Word (Word8)
-import Foreign (Ptr, FunPtr, nullPtr, castPtr)
-import Foreign.C.String (CString)
-import Foreign.ForeignPtr (newForeignPtr, withForeignPtr)
-import Foreign.Marshal.Alloc (free)
-import Foreign.Marshal.Array (withArrayLen, peekArray, mallocArray, copyArray)
-import System.IO.Unsafe (unsafePerformIO)
-import qualified Data.ByteString as B
-import qualified Data.Text as T
-import qualified Data.Text.Encoding as T
-import qualified Data.Text.Encoding.Error as T
-import qualified Data.Vector.Storable as S
-import qualified Foreign.Concurrent as ForeignC
-
-import Data.ProtoLens (Message, encodeMessage)
-import Proto.Tensorflow.Core.Framework.Graph (GraphDef)
-import Proto.Tensorflow.Core.Framework.Types (DataType(..))
-import Proto.Tensorflow.Core.Protobuf.Config (ConfigProto)
-
-import qualified TensorFlow.Internal.Raw as Raw
-
-data TensorFlowException = TensorFlowException Raw.Code T.Text
-    deriving (Show, Eq, Typeable)
-
-instance Exception TensorFlowException
-
--- | All of the data needed to represent a tensor.
-data TensorData = TensorData
-    { tensorDataDimensions :: [Int64]
-    , tensorDataType       :: !DataType
-    , tensorDataBytes      :: !(S.Vector Word8)
-    }
-  deriving (Show, Eq)
-
--- | Runs the given action after creating a session with options
--- populated by the given optionSetter.
-withSession :: (Raw.SessionOptions -> IO ())
-            -> ((IO () -> IO ()) -> Raw.Session -> IO a)
-            -- ^ The action can spawn concurrent tasks which will
-            -- be canceled before withSession returns.
-            -> IO a
-withSession optionSetter action = do
-    drain <- newMVar []
-    let cleanup s =
-        -- Closes the session to nudge the pending run calls to fail and exit.
-            finally (checkStatus (Raw.closeSession s)) $ do
-                runners <- takeMVar drain
-                -- Collects all runners before deleting the session.
-                mapM_ shutDownRunner runners
-                checkStatus (Raw.deleteSession s)
-    bracket Raw.newSessionOptions Raw.deleteSessionOptions $ \options -> do
-        optionSetter options
-        bracket
-            (checkStatus (Raw.newSession options))
-            cleanup
-            (action (asyncCollector drain))
-
-asyncCollector :: MVar [Async ()] -> IO () -> IO ()
-asyncCollector drain runner = modifyMVarMasked_ drain launchAndRecord
-    where
-      launchAndRecord restRunners = (: restRunners) <$> async runner
-
-shutDownRunner :: Async () -> IO ()
-shutDownRunner r = do
-    cancel r
-    -- TODO(gnezdo): manage exceptions better than print.
-    either print (const (return ())) =<< waitCatch r
-
-extendGraph :: Raw.Session -> GraphDef -> IO ()
-extendGraph session pb =
-    useProtoAsVoidPtrLen pb $ \ptr len ->
-        checkStatus $ Raw.extendGraph session ptr len
-
-
-run :: Raw.Session
-    -> [(B.ByteString, TensorData)] -- ^ Feeds.
-    -> [B.ByteString]               -- ^ Fetches.
-    -> [B.ByteString]               -- ^ Targets.
-    -> IO [TensorData]
-run session feeds fetches targets = do
-    let nullTensor = Raw.Tensor nullPtr
-    -- Use mask to avoid leaking input tensors before they are passed to 'run'
-    -- and output tensors before they are passed to 'createTensorData'.
-    mask_ $
-        -- Feeds
-        withStringArrayLen (fst <$> feeds) $ \feedsLen feedNames ->
-        mapM (createRawTensor . snd) feeds >>= \feedTensors ->
-        withArrayLen feedTensors $ \_ cFeedTensors ->
-        -- Fetches.
-        withStringArrayLen fetches $ \fetchesLen fetchNames ->
-        -- tensorOuts is an array of null Tensor pointers that will be filled
-        -- by the call to Raw.run.
-        withArrayLen (replicate fetchesLen nullTensor) $ \_ tensorOuts ->
-        -- Targets.
-        withStringArrayLen targets $ \targetsLen ctargets -> do
-            checkStatus $ Raw.run
-                session
-                nullPtr
-                feedNames cFeedTensors (safeConvert feedsLen)
-                fetchNames tensorOuts (safeConvert fetchesLen)
-                ctargets (safeConvert targetsLen)
-                nullPtr
-            mapM_ Raw.deleteTensor feedTensors
-            outTensors <- peekArray fetchesLen tensorOuts
-            mapM createTensorData outTensors
-
-
--- Internal.
-
-
--- | Same as 'fromIntegral', but throws an error if conversion is "lossy".
-safeConvert ::
-    forall a b. (Show a, Show b, Bits a, Bits b, Integral a, Integral b)
-    => a -> b
-safeConvert x =
-    fromMaybe
-    (error ("Failed to convert " ++ show x ++ ", got " ++
-            show (fromIntegral x :: b)))
-    (toIntegralSized x)
-
-
--- | Use a list of ByteString as a list of CString.
-withStringList :: [B.ByteString] -> ([CString] -> IO a) -> IO a
-withStringList strings fn = go strings []
-  where
-    go [] cs = fn (reverse cs)
-    -- TODO(fmayle): Is it worth using unsafeAsCString here?
-    go (x:xs) cs = B.useAsCString x $ \c -> go xs (c:cs)
-
-
--- | Use a list of ByteString as an array of CString.
-withStringArrayLen :: [B.ByteString] -> (Int -> Ptr CString -> IO a) -> IO a
-withStringArrayLen xs fn = withStringList xs (`withArrayLen` fn)
-
-
--- | Create a Raw.Tensor from a TensorData.
-createRawTensor :: TensorData -> IO Raw.Tensor
-createRawTensor (TensorData dims dt byteVec) =
-    withArrayLen (map safeConvert dims) $ \cdimsLen cdims -> do
-        let len = S.length byteVec
-        dest <- mallocArray len
-        S.unsafeWith byteVec $ \x -> copyArray dest x len
-        Raw.newTensor (toEnum $ fromEnum dt)
-                      cdims (safeConvert cdimsLen)
-                      (castPtr dest) (safeConvert len)
-                      tensorDeallocFunPtr nullPtr
-
-{-# NOINLINE tensorDeallocFunPtr #-}
-tensorDeallocFunPtr :: FunPtr Raw.TensorDeallocFn
-tensorDeallocFunPtr = unsafePerformIO $ Raw.wrapTensorDealloc $ \x _ _ -> free x
-
--- | Create a TensorData from a Raw.Tensor.
---
--- Takes ownership of the Raw.Tensor.
-createTensorData :: Raw.Tensor -> IO TensorData
-createTensorData t = do
-    -- Read dimensions.
-    numDims <- Raw.numDims t
-    dims <- mapM (Raw.dim t) [0..numDims-1]
-    -- Read type.
-    dtype <- toEnum . fromEnum <$> Raw.tensorType t
-    -- Read data.
-    len <- safeConvert <$> Raw.tensorByteSize t
-    bytes <- castPtr <$> Raw.tensorData t :: IO (Ptr Word8)
-    fp <- ForeignC.newForeignPtr bytes (Raw.deleteTensor t)
-    let v = S.unsafeFromForeignPtr0 fp len
-    return $ TensorData (map safeConvert dims) dtype v
-
--- | Runs the given action which does FFI calls updating a provided
--- status object. If the status is not OK it is thrown as
--- TensorFlowException.
-checkStatus :: (Raw.Status -> IO a) -> IO a
-checkStatus fn =
-    bracket Raw.newStatus Raw.deleteStatus $ \status -> do
-        result <- fn status
-        code <- Raw.getCode status
-        when (code /= Raw.TF_OK) $ do
-            msg <- T.decodeUtf8With T.lenientDecode <$>
-                   (Raw.message status >>= B.packCString)
-            throwIO $ TensorFlowException code msg
-        return result
-
-setSessionConfig :: ConfigProto -> Raw.SessionOptions -> IO ()
-setSessionConfig pb opt =
-    useProtoAsVoidPtrLen pb $ \ptr len ->
-        checkStatus (Raw.setConfig opt ptr len)
-
-setSessionTarget :: B.ByteString -> Raw.SessionOptions -> IO ()
-setSessionTarget target = B.useAsCString target . Raw.setTarget
-
--- | Serializes the given msg and provides it as (ptr,len) argument
--- to the given action.
-useProtoAsVoidPtrLen :: (Message msg, Integral c, Show c, Bits c) =>
-                        msg -> (Ptr b -> c -> IO a) -> IO a
-useProtoAsVoidPtrLen msg f = B.useAsCStringLen (encodeMessage msg) $
-        \(bytes, len) -> f (castPtr bytes) (safeConvert len)
-
--- | Returns the serialized OpList of all OpDefs defined in this
--- address space.
-getAllOpList :: IO B.ByteString
-getAllOpList = do
-    foreignPtr <-
-        mask_ (newForeignPtr Raw.deleteBuffer =<< checkCall)
-    -- Makes a copy because it is more reliable than eviscerating
-    -- Buffer to steal its memory (including custom deallocator).
-    withForeignPtr foreignPtr $
-        \ptr -> B.packCStringLen =<< (,)
-                <$> (castPtr <$> Raw.getBufferData ptr)
-                <*> (safeConvert <$> Raw.getBufferLength ptr)
-    where
-      checkCall = do
-          p <- Raw.getAllOpList
-          when (p == nullPtr) (throwIO exception)
-          return p
-      exception = TensorFlowException
-                Raw.TF_UNKNOWN "GetAllOpList failure, check logs"
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-Raw.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-Raw.html deleted file mode 100644 index ed8c6ed..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-Raw.html +++ /dev/null @@ -1,522 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/TensorFlow/Internal/Raw.hs - - - -
-- GENERATED by C->Haskell Compiler, version 0.28.1 Switcheroo, 1 April 2016 (Haskell)
--- Edit the ORIGNAL .chs file instead!
-
-
-{-# LINE 1 "src/TensorFlow/Internal/Raw.chs" #-}
--- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE ForeignFunctionInterface #-}
-
-module TensorFlow.Internal.Raw where
-import qualified Foreign.C.Types as C2HSImp
-import qualified Foreign.Ptr as C2HSImp
-import qualified Foreign.Storable as C2HSImp
-
-
-
-
-
-import Foreign
-import Foreign.C
-
-data DataType = TF_FLOAT
-              | TF_DOUBLE
-              | TF_INT32
-              | TF_UINT8
-              | TF_INT16
-              | TF_INT8
-              | TF_STRING
-              | TF_COMPLEX64
-              | TF_COMPLEX
-              | TF_INT64
-              | TF_BOOL
-              | TF_QINT8
-              | TF_QUINT8
-              | TF_QINT32
-              | TF_BFLOAT16
-              | TF_QINT16
-              | TF_QUINT16
-              | TF_UINT16
-              | TF_COMPLEX128
-              | TF_HALF
-              | TF_RESOURCE
-  deriving (Show,Eq)
-instance Enum DataType where
-  succ TF_FLOAT = TF_DOUBLE
-  succ TF_DOUBLE = TF_INT32
-  succ TF_INT32 = TF_UINT8
-  succ TF_UINT8 = TF_INT16
-  succ TF_INT16 = TF_INT8
-  succ TF_INT8 = TF_STRING
-  succ TF_STRING = TF_COMPLEX64
-  succ TF_COMPLEX64 = TF_INT64
-  succ TF_COMPLEX = TF_INT64
-  succ TF_INT64 = TF_BOOL
-  succ TF_BOOL = TF_QINT8
-  succ TF_QINT8 = TF_QUINT8
-  succ TF_QUINT8 = TF_QINT32
-  succ TF_QINT32 = TF_BFLOAT16
-  succ TF_BFLOAT16 = TF_QINT16
-  succ TF_QINT16 = TF_QUINT16
-  succ TF_QUINT16 = TF_UINT16
-  succ TF_UINT16 = TF_COMPLEX128
-  succ TF_COMPLEX128 = TF_HALF
-  succ TF_HALF = TF_RESOURCE
-  succ TF_RESOURCE = error "DataType.succ: TF_RESOURCE has no successor"
-
-  pred TF_DOUBLE = TF_FLOAT
-  pred TF_INT32 = TF_DOUBLE
-  pred TF_UINT8 = TF_INT32
-  pred TF_INT16 = TF_UINT8
-  pred TF_INT8 = TF_INT16
-  pred TF_STRING = TF_INT8
-  pred TF_COMPLEX64 = TF_STRING
-  pred TF_COMPLEX = TF_STRING
-  pred TF_INT64 = TF_COMPLEX64
-  pred TF_BOOL = TF_INT64
-  pred TF_QINT8 = TF_BOOL
-  pred TF_QUINT8 = TF_QINT8
-  pred TF_QINT32 = TF_QUINT8
-  pred TF_BFLOAT16 = TF_QINT32
-  pred TF_QINT16 = TF_BFLOAT16
-  pred TF_QUINT16 = TF_QINT16
-  pred TF_UINT16 = TF_QUINT16
-  pred TF_COMPLEX128 = TF_UINT16
-  pred TF_HALF = TF_COMPLEX128
-  pred TF_RESOURCE = TF_HALF
-  pred TF_FLOAT = error "DataType.pred: TF_FLOAT has no predecessor"
-
-  enumFromTo from to = go from
-    where
-      end = fromEnum to
-      go v = case compare (fromEnum v) end of
-                 LT -> v : go (succ v)
-                 EQ -> [v]
-                 GT -> []
-
-  enumFrom from = enumFromTo from TF_RESOURCE
-
-  fromEnum TF_FLOAT = 1
-  fromEnum TF_DOUBLE = 2
-  fromEnum TF_INT32 = 3
-  fromEnum TF_UINT8 = 4
-  fromEnum TF_INT16 = 5
-  fromEnum TF_INT8 = 6
-  fromEnum TF_STRING = 7
-  fromEnum TF_COMPLEX64 = 8
-  fromEnum TF_COMPLEX = 8
-  fromEnum TF_INT64 = 9
-  fromEnum TF_BOOL = 10
-  fromEnum TF_QINT8 = 11
-  fromEnum TF_QUINT8 = 12
-  fromEnum TF_QINT32 = 13
-  fromEnum TF_BFLOAT16 = 14
-  fromEnum TF_QINT16 = 15
-  fromEnum TF_QUINT16 = 16
-  fromEnum TF_UINT16 = 17
-  fromEnum TF_COMPLEX128 = 18
-  fromEnum TF_HALF = 19
-  fromEnum TF_RESOURCE = 20
-
-  toEnum 1 = TF_FLOAT
-  toEnum 2 = TF_DOUBLE
-  toEnum 3 = TF_INT32
-  toEnum 4 = TF_UINT8
-  toEnum 5 = TF_INT16
-  toEnum 6 = TF_INT8
-  toEnum 7 = TF_STRING
-  toEnum 8 = TF_COMPLEX64
-  toEnum 9 = TF_INT64
-  toEnum 10 = TF_BOOL
-  toEnum 11 = TF_QINT8
-  toEnum 12 = TF_QUINT8
-  toEnum 13 = TF_QINT32
-  toEnum 14 = TF_BFLOAT16
-  toEnum 15 = TF_QINT16
-  toEnum 16 = TF_QUINT16
-  toEnum 17 = TF_UINT16
-  toEnum 18 = TF_COMPLEX128
-  toEnum 19 = TF_HALF
-  toEnum 20 = TF_RESOURCE
-  toEnum unmatched = error ("DataType.toEnum: Cannot match " ++ show unmatched)
-
-{-# LINE 24 "src/TensorFlow/Internal/Raw.chs" #-}
-
-data Code = TF_OK
-          | TF_CANCELLED
-          | TF_UNKNOWN
-          | TF_INVALID_ARGUMENT
-          | TF_DEADLINE_EXCEEDED
-          | TF_NOT_FOUND
-          | TF_ALREADY_EXISTS
-          | TF_PERMISSION_DENIED
-          | TF_RESOURCE_EXHAUSTED
-          | TF_FAILED_PRECONDITION
-          | TF_ABORTED
-          | TF_OUT_OF_RANGE
-          | TF_UNIMPLEMENTED
-          | TF_INTERNAL
-          | TF_UNAVAILABLE
-          | TF_DATA_LOSS
-          | TF_UNAUTHENTICATED
-  deriving (Show,Eq)
-instance Enum Code where
-  succ TF_OK = TF_CANCELLED
-  succ TF_CANCELLED = TF_UNKNOWN
-  succ TF_UNKNOWN = TF_INVALID_ARGUMENT
-  succ TF_INVALID_ARGUMENT = TF_DEADLINE_EXCEEDED
-  succ TF_DEADLINE_EXCEEDED = TF_NOT_FOUND
-  succ TF_NOT_FOUND = TF_ALREADY_EXISTS
-  succ TF_ALREADY_EXISTS = TF_PERMISSION_DENIED
-  succ TF_PERMISSION_DENIED = TF_RESOURCE_EXHAUSTED
-  succ TF_RESOURCE_EXHAUSTED = TF_FAILED_PRECONDITION
-  succ TF_FAILED_PRECONDITION = TF_ABORTED
-  succ TF_ABORTED = TF_OUT_OF_RANGE
-  succ TF_OUT_OF_RANGE = TF_UNIMPLEMENTED
-  succ TF_UNIMPLEMENTED = TF_INTERNAL
-  succ TF_INTERNAL = TF_UNAVAILABLE
-  succ TF_UNAVAILABLE = TF_DATA_LOSS
-  succ TF_DATA_LOSS = TF_UNAUTHENTICATED
-  succ TF_UNAUTHENTICATED = error "Code.succ: TF_UNAUTHENTICATED has no successor"
-
-  pred TF_CANCELLED = TF_OK
-  pred TF_UNKNOWN = TF_CANCELLED
-  pred TF_INVALID_ARGUMENT = TF_UNKNOWN
-  pred TF_DEADLINE_EXCEEDED = TF_INVALID_ARGUMENT
-  pred TF_NOT_FOUND = TF_DEADLINE_EXCEEDED
-  pred TF_ALREADY_EXISTS = TF_NOT_FOUND
-  pred TF_PERMISSION_DENIED = TF_ALREADY_EXISTS
-  pred TF_RESOURCE_EXHAUSTED = TF_PERMISSION_DENIED
-  pred TF_FAILED_PRECONDITION = TF_RESOURCE_EXHAUSTED
-  pred TF_ABORTED = TF_FAILED_PRECONDITION
-  pred TF_OUT_OF_RANGE = TF_ABORTED
-  pred TF_UNIMPLEMENTED = TF_OUT_OF_RANGE
-  pred TF_INTERNAL = TF_UNIMPLEMENTED
-  pred TF_UNAVAILABLE = TF_INTERNAL
-  pred TF_DATA_LOSS = TF_UNAVAILABLE
-  pred TF_UNAUTHENTICATED = TF_DATA_LOSS
-  pred TF_OK = error "Code.pred: TF_OK has no predecessor"
-
-  enumFromTo from to = go from
-    where
-      end = fromEnum to
-      go v = case compare (fromEnum v) end of
-                 LT -> v : go (succ v)
-                 EQ -> [v]
-                 GT -> []
-
-  enumFrom from = enumFromTo from TF_UNAUTHENTICATED
-
-  fromEnum TF_OK = 0
-  fromEnum TF_CANCELLED = 1
-  fromEnum TF_UNKNOWN = 2
-  fromEnum TF_INVALID_ARGUMENT = 3
-  fromEnum TF_DEADLINE_EXCEEDED = 4
-  fromEnum TF_NOT_FOUND = 5
-  fromEnum TF_ALREADY_EXISTS = 6
-  fromEnum TF_PERMISSION_DENIED = 7
-  fromEnum TF_RESOURCE_EXHAUSTED = 8
-  fromEnum TF_FAILED_PRECONDITION = 9
-  fromEnum TF_ABORTED = 10
-  fromEnum TF_OUT_OF_RANGE = 11
-  fromEnum TF_UNIMPLEMENTED = 12
-  fromEnum TF_INTERNAL = 13
-  fromEnum TF_UNAVAILABLE = 14
-  fromEnum TF_DATA_LOSS = 15
-  fromEnum TF_UNAUTHENTICATED = 16
-
-  toEnum 0 = TF_OK
-  toEnum 1 = TF_CANCELLED
-  toEnum 2 = TF_UNKNOWN
-  toEnum 3 = TF_INVALID_ARGUMENT
-  toEnum 4 = TF_DEADLINE_EXCEEDED
-  toEnum 5 = TF_NOT_FOUND
-  toEnum 6 = TF_ALREADY_EXISTS
-  toEnum 7 = TF_PERMISSION_DENIED
-  toEnum 8 = TF_RESOURCE_EXHAUSTED
-  toEnum 9 = TF_FAILED_PRECONDITION
-  toEnum 10 = TF_ABORTED
-  toEnum 11 = TF_OUT_OF_RANGE
-  toEnum 12 = TF_UNIMPLEMENTED
-  toEnum 13 = TF_INTERNAL
-  toEnum 14 = TF_UNAVAILABLE
-  toEnum 15 = TF_DATA_LOSS
-  toEnum 16 = TF_UNAUTHENTICATED
-  toEnum unmatched = error ("Code.toEnum: Cannot match " ++ show unmatched)
-
-{-# LINE 25 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-
--- Status.
-newtype Status = Status (C2HSImp.Ptr (Status))
-{-# LINE 29 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-newStatus :: IO Status
-newStatus = tFNewStatus
-{-# LINE 32 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-deleteStatus :: Status -> IO ()
-deleteStatus = tFDeleteStatus
-{-# LINE 35 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-setStatus :: Status -> Code -> CString -> IO ()
-setStatus s c = tFSetStatus s (fromIntegral $ fromEnum c)
-
-getCode :: Status -> IO Code
-getCode s = toEnum . fromIntegral <$> tFGetCode s
-
-message :: Status -> IO CString
-message = tFMessage
-{-# LINE 44 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-
--- Buffer.
-data Buffer
-type BufferPtr = C2HSImp.Ptr (Buffer)
-{-# LINE 49 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-getBufferData :: BufferPtr -> IO (Ptr ())
-getBufferData = (\ptr -> do {C2HSImp.peekByteOff ptr 0 :: IO (C2HSImp.Ptr ())})
-{-# LINE 52 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-getBufferLength :: BufferPtr -> IO CULong
-getBufferLength =(\ptr -> do {C2HSImp.peekByteOff ptr 8 :: IO C2HSImp.CULong})
-{-# LINE 55 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
--- Tensor.
-newtype Tensor = Tensor (C2HSImp.Ptr (Tensor))
-{-# LINE 58 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-instance Storable Tensor where
-    sizeOf (Tensor t) = sizeOf t
-    alignment (Tensor t) = alignment t
-    peek p = fmap Tensor (peek (castPtr p))
-    poke p (Tensor t) = poke (castPtr p) t
-
--- A synonym for the int64_t type, which is used in the TensorFlow API.
--- On some platforms it's `long`; on others (e.g., Mac OS X) it's `long long`;
--- and as far as Haskell is concerned, those are distinct types (`CLong` vs
--- `CLLong`).
-type CInt64 = (C2HSImp.CLLong)
-{-# LINE 70 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-newTensor :: DataType
-          -> Ptr CInt64   -- dimensions array
-          -> CInt         -- num dimensions
-          -> Ptr ()       -- data
-          -> CULong       -- data len
-          -> FunPtr (Ptr () -> CULong -> Ptr () -> IO ())  -- deallocator
-          -> Ptr ()       -- deallocator arg
-          -> IO Tensor
-newTensor dt = tFNewTensor (fromIntegral $ fromEnum dt)
-
-deleteTensor :: Tensor -> IO ()
-deleteTensor = tFDeleteTensor
-{-# LINE 83 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-tensorType :: Tensor -> IO DataType
-tensorType t = toEnum . fromIntegral <$> tFTensorType t
-
-numDims :: Tensor -> IO CInt
-numDims = tFNumDims
-{-# LINE 89 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-dim :: Tensor -> CInt -> IO CInt64
-dim = tFDim
-{-# LINE 92 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-tensorByteSize :: Tensor -> IO CULong
-tensorByteSize = tFTensorByteSize
-{-# LINE 95 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-tensorData :: Tensor -> IO (Ptr ())
-tensorData = tFTensorData
-{-# LINE 98 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-
--- Session Options.
-newtype SessionOptions = SessionOptions (C2HSImp.Ptr (SessionOptions))
-{-# LINE 102 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-newSessionOptions :: IO SessionOptions
-newSessionOptions = tFNewSessionOptions
-{-# LINE 105 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-setTarget :: SessionOptions -> CString -> IO ()
-setTarget = tFSetTarget
-{-# LINE 108 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-setConfig :: SessionOptions -> Ptr () -> CULong -> Status -> IO ()
-setConfig = tFSetConfig
-{-# LINE 111 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-deleteSessionOptions :: SessionOptions -> IO ()
-deleteSessionOptions = tFDeleteSessionOptions
-{-# LINE 114 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-
--- Session.
-newtype Session = Session (C2HSImp.Ptr (Session))
-{-# LINE 118 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-newSession :: SessionOptions -> Status -> IO Session
-newSession = tFNewDeprecatedSession
-{-# LINE 121 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-closeSession :: Session -> Status -> IO ()
-closeSession = tFCloseDeprecatedSession
-{-# LINE 124 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-deleteSession :: Session -> Status -> IO ()
-deleteSession = tFDeleteDeprecatedSession
-{-# LINE 127 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-extendGraph :: Session -> Ptr () -> CULong -> Status -> IO ()
-extendGraph = tFExtendGraph
-{-# LINE 130 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-run :: Session
-    -> BufferPtr                          -- RunOptions proto.
-    -> Ptr CString -> Ptr Tensor -> CInt  -- Input (names, tensors, count).
-    -> Ptr CString -> Ptr Tensor -> CInt  -- Output (names, tensors, count).
-    -> Ptr CString -> CInt                -- Target nodes (names, count).
-    -> BufferPtr                          -- RunMetadata proto.
-    -> Status
-    -> IO ()
-run = tFRun
-{-# LINE 140 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
--- FFI helpers.
-type TensorDeallocFn = Ptr () -> CULong -> Ptr () -> IO ()
-foreign import ccall "wrapper"
-    wrapTensorDealloc :: TensorDeallocFn -> IO (FunPtr TensorDeallocFn)
-
-
--- | Get the OpList of all OpDefs defined in this address space.
--- Returns a BufferPtr, ownership of which is transferred to the caller
--- (and can be freed using deleteBuffer).
---
--- The data in the buffer will be the serialized OpList proto for ops registered
--- in this address space.
-getAllOpList :: IO BufferPtr
-getAllOpList = tFGetAllOpList
-{-# LINE 155 "src/TensorFlow/Internal/Raw.chs" #-}
-
-
-foreign import ccall "&TF_DeleteBuffer"
-  deleteBuffer :: FunPtr (BufferPtr -> IO ())
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewStatus"
-  tFNewStatus :: (IO (Status))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteStatus"
-  tFDeleteStatus :: ((Status) -> (IO ()))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_SetStatus"
-  tFSetStatus :: ((Status) -> (C2HSImp.CInt -> ((C2HSImp.Ptr C2HSImp.CChar) -> (IO ()))))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_GetCode"
-  tFGetCode :: ((Status) -> (IO C2HSImp.CInt))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_Message"
-  tFMessage :: ((Status) -> (IO (C2HSImp.Ptr C2HSImp.CChar)))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewTensor"
-  tFNewTensor :: (C2HSImp.CInt -> ((C2HSImp.Ptr C2HSImp.CLLong) -> (C2HSImp.CInt -> ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((C2HSImp.FunPtr ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((C2HSImp.Ptr ()) -> (IO ()))))) -> ((C2HSImp.Ptr ()) -> (IO (Tensor)))))))))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteTensor"
-  tFDeleteTensor :: ((Tensor) -> (IO ()))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_TensorType"
-  tFTensorType :: ((Tensor) -> (IO C2HSImp.CInt))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NumDims"
-  tFNumDims :: ((Tensor) -> (IO C2HSImp.CInt))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_Dim"
-  tFDim :: ((Tensor) -> (C2HSImp.CInt -> (IO C2HSImp.CLLong)))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_TensorByteSize"
-  tFTensorByteSize :: ((Tensor) -> (IO C2HSImp.CULong))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_TensorData"
-  tFTensorData :: ((Tensor) -> (IO (C2HSImp.Ptr ())))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewSessionOptions"
-  tFNewSessionOptions :: (IO (SessionOptions))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_SetTarget"
-  tFSetTarget :: ((SessionOptions) -> ((C2HSImp.Ptr C2HSImp.CChar) -> (IO ())))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_SetConfig"
-  tFSetConfig :: ((SessionOptions) -> ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((Status) -> (IO ())))))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteSessionOptions"
-  tFDeleteSessionOptions :: ((SessionOptions) -> (IO ()))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_NewDeprecatedSession"
-  tFNewDeprecatedSession :: ((SessionOptions) -> ((Status) -> (IO (Session))))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_CloseDeprecatedSession"
-  tFCloseDeprecatedSession :: ((Session) -> ((Status) -> (IO ())))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_DeleteDeprecatedSession"
-  tFDeleteDeprecatedSession :: ((Session) -> ((Status) -> (IO ())))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_ExtendGraph"
-  tFExtendGraph :: ((Session) -> ((C2HSImp.Ptr ()) -> (C2HSImp.CULong -> ((Status) -> (IO ())))))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_Run"
-  tFRun :: ((Session) -> ((BufferPtr) -> ((C2HSImp.Ptr (C2HSImp.Ptr C2HSImp.CChar)) -> ((C2HSImp.Ptr (Tensor)) -> (C2HSImp.CInt -> ((C2HSImp.Ptr (C2HSImp.Ptr C2HSImp.CChar)) -> ((C2HSImp.Ptr (Tensor)) -> (C2HSImp.CInt -> ((C2HSImp.Ptr (C2HSImp.Ptr C2HSImp.CChar)) -> (C2HSImp.CInt -> ((BufferPtr) -> ((Status) -> (IO ())))))))))))))
-
-foreign import ccall safe "TensorFlow/Internal/Raw.chs.h TF_GetAllOpList"
-  tFGetAllOpList :: (IO (BufferPtr))
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-VarInt.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-VarInt.html deleted file mode 100644 index 3f4826d..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Internal-VarInt.html +++ /dev/null @@ -1,61 +0,0 @@ - - - - - -src/TensorFlow/Internal/VarInt.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE BangPatterns #-}
-
-{-|
-Module      : TensorFlow.Internal.VarInt
-Description : Encoders and decoders for varint types.
-
-Originally taken from internal proto-lens code.
--}
-module TensorFlow.Internal.VarInt
-    ( getVarInt
-    , putVarInt
-    ) where
-
-import Data.Attoparsec.ByteString as Parse
-import Data.Bits
-import Data.ByteString.Lazy.Builder as Builder
-import Data.Monoid ((<>))
-import Data.Word (Word64)
-
--- | Decode an unsigned varint.
-getVarInt :: Parser Word64
-getVarInt = loop 1 0
-  where
-    loop !s !n = do
-        b <- anyWord8
-        let n' = n + s * fromIntegral (b .&. 127)
-        if (b .&. 128) == 0
-            then return n'
-            else loop (128*s) n'
-
--- | Encode a Word64.
-putVarInt :: Word64 -> Builder
-putVarInt n
-    | n < 128 = Builder.word8 (fromIntegral n)
-    | otherwise = Builder.word8 (fromIntegral $ n .&. 127 .|. 128)
-                      <> putVarInt (n `shiftR` 7)
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Nodes.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Nodes.html deleted file mode 100644 index c5a5d69..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Nodes.html +++ /dev/null @@ -1,152 +0,0 @@ - - - - - -src/TensorFlow/Nodes.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE FlexibleInstances #-}
-{-# LANGUAGE GeneralizedNewtypeDeriving #-}
-{-# LANGUAGE MultiParamTypeClasses #-}
-{-# LANGUAGE RankNTypes #-}
-{-# LANGUAGE ScopedTypeVariables #-}
-{-# LANGUAGE TypeFamilies #-}
-module TensorFlow.Nodes where
-
-import Control.Applicative (liftA2, liftA3)
-import Data.Map.Strict (Map)
-import Data.Monoid ((<>))
-import Data.Set (Set)
-import Data.String (IsString)
-import Data.Text (Text)
-import Lens.Family2 ((^.))
-import qualified Data.Map.Strict as Map
-import qualified Data.Set as Set
-import qualified Data.Vector as V
-
-import TensorFlow.Build
-import TensorFlow.Output
-import TensorFlow.Tensor
-import TensorFlow.Types
-import qualified TensorFlow.Internal.FFI as FFI
-
--- | Types that contain ops which can be run.
-class Nodes t where
-    getNodes :: t -> Build (Set NodeName)
-
--- | Types that tensor representations (e.g. 'Tensor', 'ControlNode') can be
--- fetched into.
---
--- Includes collections of tensors (e.g. tuples).
-class Nodes t => Fetchable t a where
-    getFetch :: t -> Build (Fetch a)
-
--- | Fetch action. Keeps track of what needs to be fetched and how to decode
--- the fetched data.
-data Fetch a = Fetch
-          { -- | Nodes to fetch
-            fetches :: Set Text
-            -- | Function to create an 'a' from the fetched data.
-          , fetchRestore :: Map Text FFI.TensorData -> a
-          }
-
-instance Functor Fetch where
-    fmap f (Fetch fetch restore) = Fetch fetch (f . restore)
-
-instance Applicative Fetch where
-    pure x = Fetch Set.empty (const x)
-    Fetch fetch restore <*> Fetch fetch' restore' =
-        Fetch (fetch <> fetch') (restore <*> restore')
-
-nodesUnion :: (Monoid b, Traversable t, Applicative f) => t (f b) -> f b
-nodesUnion = fmap (foldMap id) . sequenceA
-
-instance (Nodes t1, Nodes t2) => Nodes (t1, t2) where
-    getNodes (x, y) = nodesUnion [getNodes x, getNodes y]
-
-instance (Nodes t1, Nodes t2, Nodes t3) => Nodes (t1, t2, t3) where
-    getNodes (x, y, z) = nodesUnion [getNodes x, getNodes y, getNodes z]
-
-instance (Fetchable t1 a1, Fetchable t2 a2) => Fetchable (t1, t2) (a1, a2) where
-    getFetch (x, y) = liftA2 (,) <$> getFetch x <*> getFetch y
-
-instance (Fetchable t1 a1, Fetchable t2 a2, Fetchable t3 a3)
-         => Fetchable (t1, t2, t3) (a1, a2, a3) where
-    getFetch (x, y, z) =
-        liftA3 (,,) <$> getFetch x <*> getFetch y <*> getFetch z
-
-instance Nodes t => Nodes [t] where
-    getNodes = nodesUnion . map getNodes
-
-instance Fetchable t a => Fetchable [t] [a] where
-    getFetch ts  = sequenceA <$> mapM getFetch ts
-
-instance Nodes ControlNode where
-    getNodes (ControlNode o) = Set.singleton <$> getOrAddOp o
-
--- We use the constraint @(a ~ ())@ to help with type inference.  For example,
--- if @t :: ControlNode@, then this constraint ensures that @run t :: Session
--- ()@.  If we used @instance Fetchable ControlNode ()@ instead, then that
--- expression would be ambiguous without explicitly specifying the return type.
-instance a ~ () => Fetchable ControlNode a where
-    getFetch _ = return $ pure ()
-
-instance Nodes (Tensor v a) where
-    getNodes t = Set.singleton <$> getOrAddOp (t ^. tensorOutput . outputOp)
-
-fetchTensorList :: TensorType a => Tensor v a -> Build (Fetch (Shape, [a]))
-fetchTensorList t = fmap (fmap V.toList) <$> fetchTensorVector t
-
-fetchTensorVector :: forall a v . TensorType a
-                  => Tensor v a -> Build (Fetch (Shape, V.Vector a))
-fetchTensorVector (Tensor _ o) = do
-    outputName <- renderOutput o
-    return $ Fetch (Set.singleton outputName) $ \tensors ->
-        let tensorData = tensors Map.! outputName
-            shape = Shape $ FFI.tensorDataDimensions tensorData
-            vec = decodeTensorData $ TensorData tensorData
-
-            expectedType = tensorType (undefined :: a)
-            actualType = FFI.tensorDataType tensorData
-            badTypeError = error $ "Bad tensor type: expected "
-                                   ++ show expectedType
-                                   ++ ", got "
-                                   ++ show actualType
-        in if expectedType /= actualType
-               then badTypeError
-               else (shape, vec)
-
--- The constraint "a ~ a'" means that the input/output of fetch can constrain
--- the TensorType of each other.
-instance (TensorType a, a ~ a') => Fetchable (Tensor v a) (V.Vector a') where
-    getFetch t = fmap snd <$> fetchTensorVector t
-
-newtype Scalar a = Scalar {unScalar :: a}
-    deriving (Show, Eq, Ord, Num, Fractional, Floating, Real, RealFloat,
-              RealFrac, IsString)
-
-instance (TensorType a, a ~ a') => Fetchable (Tensor v a) (Scalar a') where
-    getFetch t = fmap (Scalar . headFromSingleton . snd) <$> fetchTensorList t
-      where
-        headFromSingleton [x] = x
-        headFromSingleton xs
-            = error $ "Unable to extract singleton from tensor of length "
-                          ++ show (length xs)
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Orphans.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Orphans.html deleted file mode 100644 index a0f10f0..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Orphans.html +++ /dev/null @@ -1,57 +0,0 @@ - - - - - -src/TensorFlow/Orphans.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-
-{-# LANGUAGE StandaloneDeriving #-}
-{-# OPTIONS_GHC -fno-warn-orphans #-}
--- Orphan instances for certain proto messages/enums, used internally.
--- TODO(judahjacobson): consider making proto-lens generate some or all of
--- these automatically; or, alternately, make new Haskell datatypes.
-module TensorFlow.Orphans() where
-
-import Proto.Tensorflow.Core.Framework.AttrValue
-    ( AttrValue(..)
-    , AttrValue'ListValue(..)
-    , NameAttrList(..)
-    )
-import Proto.Tensorflow.Core.Framework.NodeDef
-    ( NodeDef(..))
-import Proto.Tensorflow.Core.Framework.ResourceHandle
-    ( ResourceHandle(..))
-import Proto.Tensorflow.Core.Framework.Tensor
-    (TensorProto(..))
-import Proto.Tensorflow.Core.Framework.TensorShape
-    (TensorShapeProto(..), TensorShapeProto'Dim(..))
-import Proto.Tensorflow.Core.Framework.Types (DataType(..))
-
-deriving instance Ord AttrValue
-deriving instance Ord AttrValue'ListValue
-deriving instance Ord DataType
-deriving instance Ord NameAttrList
-deriving instance Ord NodeDef
-deriving instance Ord ResourceHandle
-deriving instance Ord TensorProto
-deriving instance Ord TensorShapeProto
-deriving instance Ord TensorShapeProto'Dim
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Output.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Output.html deleted file mode 100644 index 66be30e..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Output.html +++ /dev/null @@ -1,174 +0,0 @@ - - - - - -src/TensorFlow/Output.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE GeneralizedNewtypeDeriving #-}
-{-# LANGUAGE OverloadedStrings #-}
-{-# LANGUAGE Rank2Types #-}
-{-# LANGUAGE ScopedTypeVariables #-}
-
-module TensorFlow.Output
-    ( ControlNode(..)
-    , Device(..)
-    -- * Ops
-    , NodeName(..)
-    , Op(..)
-    , opUnrendered
-    , OpDef(..)
-    , opName
-    , opType
-    , opAttr
-    , opInputs
-    , opControlInputs
-    , OpType(..)
-    , OutputIx(..)
-    , Output(..)
-    , output
-    , outputIndex
-    , outputOp
-    , PendingNodeName(..)
-    , ResourceHandle(..)
-    )  where
-
-import qualified Data.Map.Strict as Map
-import Data.ProtoLens.TextFormat (showMessage)
-import Data.String (IsString(..))
-import Data.Text (Text)
-import qualified Data.Text as Text
-import Lens.Family2 (Lens', Traversal', (.~), (&), (^.))
-import Lens.Family2.Unchecked (lens)
-import Proto.Tensorflow.Core.Framework.AttrValue (AttrValue(..))
-import Proto.Tensorflow.Core.Framework.NodeDef (NodeDef(..), name)
-import Data.Default (def)
-import TensorFlow.Types (Attribute, attrLens)
-import TensorFlow.Orphans ()
-
--- | A type of graph node which has no outputs. These nodes are
--- valuable for causing side effects when they are run.
-newtype ControlNode = ControlNode { unControlNode :: Op }
-
--- | The type of op of a node in the graph.  This corresponds to the proto field
--- NodeDef.op.
-newtype OpType = OpType { unOpType :: Text }
-    deriving (Eq, Ord, Show)
-
-instance IsString OpType where
-    fromString = OpType . Text.pack
-
--- | An output of a TensorFlow node.
-data Output = Output !OutputIx !Op
-    deriving (Eq, Ord, Show)
-
-output :: OutputIx -> Op -> Output
-output = Output
-
-outputOp :: Lens' Output Op
-outputOp = lens (\(Output _ o) -> o) (\(Output i _) o -> Output i o)
-
-outputIndex :: Lens' Output OutputIx
-outputIndex = lens (\(Output i _) -> i) (\(Output _ o) i -> Output i o)
-
-newtype OutputIx = OutputIx { unOutputIx :: Int }
-    deriving (Eq, Ord, Num, Enum, Show)
-
--- | A device that a node can be assigned to.
--- There's a naming convention where the device names
--- are constructed from job and replica names.
-newtype Device = Device {deviceName :: Text}
-    deriving (Eq, Ord, IsString)
-
-instance Show Device where
-    show (Device d) = show d
-
--- | The representation of a node in a TensorFlow graph.
-data Op
-    = Rendered !NodeDef  -- ^ Properties are fixed, including the
-                         -- device, name, and scope.
-    | Unrendered !OpDef  -- ^ Properties are not fixed, and may change depending
-                         -- on which context this op is rendered in.
-    deriving (Eq, Ord)
-
-instance Show Op where
-    show (Rendered n) = "Rendered " ++ showMessage n
-    show (Unrendered o) = "Unrendered " ++ show (o ^. opName)
-
--- | Traverse on the 'Unrendered' of an 'Op'.
---
--- Same implementation as _Left.
-opUnrendered :: Traversal' Op OpDef
-opUnrendered f (Unrendered a) = Unrendered <$> f a
-opUnrendered _ (Rendered b) = pure (Rendered b)
-
--- | Op definition. This corresponds somewhat to the 'NodeDef' proto.
-data OpDef = OpDef
-    { _opName :: !PendingNodeName
-    , _opType :: !OpType
-    , _opAttrs :: !(Map.Map Text AttrValue)
-    , _opInputs :: [Output]
-    , _opControlInputs :: [NodeName]
-    }  deriving (Eq, Ord)
-
--- | The name specified for an unrendered Op.  If an Op has an
--- ImplicitName, it will be assigned based on the opType plus a
--- unique identifier.  Does not contain the "scope" prefix.
-data PendingNodeName = ExplicitName !Text | ImplicitName
-    deriving (Eq, Ord, Show)
-
--- | The name of a node in the graph.  This corresponds to the proto field
--- NodeDef.name.  Includes the scope prefix (if any) and a unique identifier
--- (if the node was implicitly named).
-newtype NodeName = NodeName { unNodeName :: Text }
-    deriving (Eq, Ord, Show)
-
-opName :: Lens' OpDef PendingNodeName
-opName = lens _opName (\o x -> o {_opName = x})
-
-opType :: Lens' OpDef OpType
-opType = lens _opType (\o x -> o { _opType = x})
-
-opAttr :: Attribute a => Text -> Lens' OpDef a
-opAttr n = lens _opAttrs (\o x -> o {_opAttrs = x})
-              . lens (Map.findWithDefault def n) (flip (Map.insert n))
-              . attrLens
-
-opInputs :: Lens' OpDef [Output]
-opInputs = lens _opInputs (\o x -> o {_opInputs = x})
-
-opControlInputs :: Lens' OpDef [NodeName]
-opControlInputs = lens _opControlInputs (\o x -> o {_opControlInputs = x})
-
--- TODO(gnezdo): IsString instance is weird and we should move that
--- code into a Build function
-instance IsString Output where
-    fromString s = case break (==':') s of
-        (n, ':':ixStr) | [(ix, "" :: String)] <- read ixStr
-                         -> Output (fromInteger ix) $ assigned n
-        _ -> Output 0 $ assigned s
-        where assigned n = Rendered $ def & name .~ Text.pack n
-
-
--- | Opaque handle to a mutable resource in the graph.  Typical such
--- resources are variables. The type parameter corresponds to the
--- dtype of the tensor held in the variable.
-newtype ResourceHandle a = ResourceHandle Output
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Session.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Session.html deleted file mode 100644 index 1cf190c..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Session.html +++ /dev/null @@ -1,240 +0,0 @@ - - - - - -src/TensorFlow/Session.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE GeneralizedNewtypeDeriving #-}
-{-# LANGUAGE OverloadedStrings #-}
-{-# LANGUAGE Rank2Types #-}
-{-# LANGUAGE ScopedTypeVariables #-}
-{-# LANGUAGE TupleSections #-}
-
-module TensorFlow.Session (
-    Session,
-    Options,
-    sessionConfig,
-    sessionTarget,
-    sessionTracer,
-    runSession,
-    runSessionWithOptions,
-    build,
-    buildAnd,
-    buildWithSummary,
-    extend,
-    addGraphDef,
-    run,
-    runWithFeeds,
-    run_,
-    runWithFeeds_,
-    asyncProdNodes,
-    ) where
-
-import Control.Monad (forever, unless, void)
-import Control.Monad.IO.Class (MonadIO, liftIO)
-import Control.Monad.Trans.Class (lift)
-import Control.Monad.Trans.Reader (ReaderT(..), ask, asks)
-import Data.ByteString (ByteString)
-import Data.Default (Default, def)
-import Data.Functor.Identity (runIdentity)
-import Data.Monoid ((<>))
-import Data.ProtoLens (showMessage)
-import Data.Set (Set)
-import Data.Text.Encoding (encodeUtf8)
-import Lens.Family2 (Lens', (^.), (&), (.~))
-import Lens.Family2.Unchecked (lens)
-import Proto.Tensorflow.Core.Framework.Graph (node)
-import Proto.Tensorflow.Core.Protobuf.Config (ConfigProto)
-import TensorFlow.Build
-import TensorFlow.Nodes
-import TensorFlow.Output (NodeName, unNodeName)
-import TensorFlow.Tensor
-
-import qualified Data.ByteString.Builder as Builder
-import qualified Data.Map.Strict as Map
-import qualified Data.Set as Set
-import qualified TensorFlow.Internal.FFI as FFI
-
--- | An action for logging.
-type Tracer = Builder.Builder -> IO ()
-
--- Common state threaded through the session.
-data SessionState
-    = SessionState {
-          rawSession :: FFI.Session
-        , asyncCollector :: IO () -> IO ()
-          -- ^ Starts the given action concurrently.
-        , tracer :: Tracer
-        }
-
-newtype Session a
-    = Session (ReaderT SessionState (BuildT IO) a)
-    deriving (Functor, Applicative, Monad, MonadIO)
-
--- | Run 'Session' actions in a new TensorFlow session.
-runSession :: Session a -> IO a
-runSession = runSessionWithOptions def
-
--- | Customization for session. Use the lenses to update:
--- 'sessionTarget', 'sessionTracer', 'sessionConfig'.
-data Options = Options
-    { _sessionTarget :: ByteString
-    , _sessionConfig :: ConfigProto
-    , _sessionTracer :: Tracer
-    }
-
-instance Default Options where
-    def = Options
-          { _sessionTarget = ""
-          , _sessionConfig = def
-          , _sessionTracer = const (return ())
-          }
-
--- | Target can be: "local", ip:port, host:port.
--- The set of supported factories depends on the linked in libraries.
-sessionTarget :: Lens' Options ByteString
-sessionTarget = lens _sessionTarget (\g x -> g { _sessionTarget = x })
-
--- | Uses the specified config for the created session.
-sessionConfig :: Lens' Options ConfigProto
-sessionConfig = lens _sessionConfig (\g x -> g { _sessionConfig = x })
-
--- | Uses the given logger to monitor session progress.
-sessionTracer :: Lens' Options Tracer
-sessionTracer = lens _sessionTracer (\g x -> g { _sessionTracer = x })
-
--- | Run 'Session' actions in a new TensorFlow session created with
--- the given option setter actions ('sessionTarget', 'sessionConfig').
-runSessionWithOptions :: Options -> Session a -> IO a
-runSessionWithOptions options (Session m) =
-    FFI.withSession applyOptions $
-        \as rs ->
-            let initState = SessionState rs as (options ^. sessionTracer)
-            in evalBuildT (runReaderT m initState)
-  where applyOptions opt = do
-            FFI.setSessionTarget (options ^. sessionTarget) opt
-            FFI.setSessionConfig (options ^. sessionConfig) opt
-
--- | Lift a 'Build' action into a 'Session', including any explicit op
--- renderings.
-build :: Build a -> Session a
-build = Session . lift . hoistBuildT (return . runIdentity)
-
--- | Lift a 'Build' action into a 'Session', including any explicit op
--- renderings. Returns the merged summary ops which can be used for
--- logging, see 'TensorFlow.Logging.build' for a convenient wrapper.
-buildWithSummary :: forall a . Build a -> Session (a, [SummaryTensor])
-buildWithSummary b = Session $ lift $ (,) <$> v <*> collectAllSummaries
-  where v :: BuildT IO a
-        v = hoistBuildT (return . runIdentity) b
-
--- | Add all pending rendered nodes to the TensorFlow graph and runs
--- any pending initializers.
---
--- Note that run, runWithFeeds, etc. will all call this function implicitly.
-extend :: Session ()
-extend = do
-    session <- Session (asks rawSession)
-    trace <- Session (asks tracer)
-    nodesToExtend <- build flushNodeBuffer
-    unless (null nodesToExtend) $ liftIO $ do
-        let graphDef = def & node .~ nodesToExtend
-        trace ("Session.extend " <> Builder.string8 (showMessage graphDef))
-        FFI.extendGraph session graphDef
-    -- Now that all the nodes are created, run the initializers.
-    initializers <- build flushInitializers
-    unless (null initializers) $
-        void $ liftIO $ FFI.run session [] [] (toNodeNames initializers)
-
--- | Helper combinator for doing something with the result of a 'Build' action.
--- Example usage:
---
--- > buildAnd run :: Fetchable t a => Build t -> Session a
-buildAnd :: (a -> Session b) -> Build a -> Session b
-buildAnd f m = build m >>= f
-
--- | Run a subgraph 't', rendering any dependent nodes that aren't already
--- rendered, and fetch the corresponding values for 'a'.
-run :: Fetchable t a => t -> Session a
-run = runWithFeeds []
-
--- | Run a subgraph 't', rendering any dependent nodes that aren't already
--- rendered, feed the given input values, and fetch the corresponding result
--- values for 'a'.
-runWithFeeds :: Fetchable t a => [Feed] -> t -> Session a
-runWithFeeds feeds t = do
-    ns <- build $ getNodes t
-    -- Note that this call to "fetch" shouldn't affect the following "extend"
-    -- call, since all nodes in t and its inputs/deps will be rendered by the
-    -- above call to getNodes.
-    fetch <- build $ getFetch t
-    runFetchWithFeeds feeds ns fetch
-
-runFetchWithFeeds :: [Feed] -> Set NodeName -> Fetch a -> Session a
-runFetchWithFeeds feeds target (Fetch fetch restore) = do
-    extend
-    feeds' <- build $ fixFeeds feeds
-    let fetchNames = encodeUtf8 <$> Set.toList fetch
-        targetNames = toNodeNames $ Set.toList target
-    session <- Session (asks rawSession)
-    runResult <- liftIO $ FFI.run session
-                                  feeds'
-                                  fetchNames
-                                  targetNames
-    let resultTensorsMap = Map.fromList $ zip (Set.toList fetch) runResult
-    return $ restore resultTensorsMap
-
-toNodeNames :: [NodeName] -> [ByteString]
-toNodeNames = map (encodeUtf8 . unNodeName)
-
--- | Run a subgraph 't', rendering and extending any dependent nodes that aren't
--- already rendered.  This behaves like 'run' except that it doesn't do any
--- fetches.
-run_ :: Nodes t => t -> Session ()
-run_ = runWithFeeds_ []
-
--- | Run a subgraph 't', rendering any dependent nodes that aren't already
--- rendered, feed the given input values, and fetch the corresponding result
--- values for 'a'.  This behaves like 'runWithFeeds' except that it doesn't do
--- any fetches.
-runWithFeeds_ :: Nodes t => [Feed] -> t -> Session ()
-runWithFeeds_ feeds t = do
-    ns <- build $ getNodes t
-    runFetchWithFeeds feeds ns (pure ())
-
-fixFeeds :: [Feed] -> Build [(ByteString, FFI.TensorData)]
-fixFeeds = mapM $ \(Feed o d) -> (,d) . encodeUtf8 <$> renderOutput o
-
--- | Starts a concurrent thread which evaluates the given Nodes
--- forever until runSession exits or an exception occurs. Graph
--- extension happens synchronously, but the resultant run proceeds as
--- a separate thread.
-asyncProdNodes :: Nodes t
-                  => t  -- ^ Node to evaluate concurrently.
-                  -> Session ()
-asyncProdNodes nodes = do
-    target <- build (getNodes nodes)
-    extend
-    let targetNames = toNodeNames $ Set.toList target
-    state <- Session ask
-    let loop = forever (void (FFI.run (rawSession state) [] [] targetNames))
-    liftIO (asyncCollector state loop)
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Tensor.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Tensor.html deleted file mode 100644 index 282d865..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Tensor.html +++ /dev/null @@ -1,96 +0,0 @@ - - - - - -src/TensorFlow/Tensor.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE FlexibleInstances #-}
-{-# LANGUAGE GADTs #-}
-{-# LANGUAGE OverloadedStrings #-}
-{-# LANGUAGE Rank2Types #-}
-
-module TensorFlow.Tensor where
-
-import Data.String (IsString(..))
-import qualified Data.Text as Text
-import Lens.Family2 (Lens', Traversal')
-import Lens.Family2.Unchecked (lens)
-
-import TensorFlow.Output (Output, outputOp, opUnrendered, opAttr)
-import TensorFlow.Types (TensorData(..), Attribute)
-import qualified TensorFlow.Internal.FFI as FFI
-
--- | A named output of a TensorFlow operation.
---
--- The type parameter @a@ is the type of the elements in the 'Tensor'.  The
--- parameter @v@ is either 'Value' or 'Ref', depending on whether the graph is
--- treating this op output as an immutable 'Value' or a stateful 'Ref' (e.g., a
--- variable).  Note that a @Tensor Ref@ can be casted into a @Tensor Value@ via
--- 'value'.
-data Tensor v a = Tensor (TensorKind v) Output
-
-data Value
-data Ref
-
--- | This class provides a runtime switch on whether a 'Tensor' should be
--- treated as a 'Value' or as a 'Ref'.
-data TensorKind v where
-  ValueKind :: TensorKind Value
-  RefKind :: TensorKind Ref
-
-tensorKind :: Lens' (Tensor v a) (TensorKind v)
-tensorKind = lens (\(Tensor v _) -> v) (\(Tensor _ o) v -> Tensor v o)
-
-tensorOutput :: Lens' (Tensor v a) Output
-tensorOutput = lens (\(Tensor _ o) -> o) (\(Tensor v _) o -> Tensor v o)
-
--- TODO: Come up with a better API for handling attributes.
--- | Lens for the attributes of a tensor.
---
--- Only valid if the tensor has not yet been rendered. If the tensor has been
--- rendered, the traversal will be over nothing (nothing can be read or
--- written).
-tensorAttr :: Attribute attr => Text.Text -> Traversal' (Tensor v a) attr
-tensorAttr x = tensorOutput . outputOp . opUnrendered . opAttr x
-
--- | Cast a 'Tensor *' into a 'Tensor Value'. Common usage is to cast a
--- Ref into Value. This behaves like a no-op.
-value :: Tensor v a -> Tensor Value a
-value (Tensor _ o) = Tensor ValueKind o
-
--- | A pair of a 'Tensor' and some data that should be fed into that 'Tensor'
--- when running the graph.
-data Feed = Feed Output FFI.TensorData
-
--- | Create a 'Feed' for feeding the given data into a 'Tensor' when running
--- the graph.
---
--- Note that if a 'Tensor' is rendered, its identity may change; so feeding the
--- rendered 'Tensor' may be different than feeding the original 'Tensor'.
-feed :: Tensor v a -> TensorData a -> Feed
-feed (Tensor _ o) (TensorData td) = Feed o td
-
--- | Create a 'Tensor' for a given name.  This can be used to reference nodes
--- in a 'GraphDef' that was loaded via 'addGraphDef'.
--- TODO(judahjacobson): add more safety checks here.
-tensorFromName :: TensorKind v -> Text.Text -> Tensor v a
-tensorFromName v = Tensor v . fromString . Text.unpack
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Types.html b/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Types.html deleted file mode 100644 index 0485fe3..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/TensorFlow-Types.html +++ /dev/null @@ -1,393 +0,0 @@ - - - - - -src/TensorFlow/Types.hs - - - -
-- Copyright 2016 TensorFlow authors.
---
--- Licensed under the Apache License, Version 2.0 (the "License");
--- you may not use this file except in compliance with the License.
--- You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-{-# LANGUAGE ConstraintKinds #-}
-{-# LANGUAGE DataKinds #-}
-{-# LANGUAGE FlexibleContexts #-}
-{-# LANGUAGE FlexibleInstances #-}
-{-# LANGUAGE OverloadedStrings #-}
-{-# LANGUAGE RankNTypes #-}
-{-# LANGUAGE ScopedTypeVariables #-}
-{-# LANGUAGE TypeFamilies #-}
-{-# LANGUAGE TypeOperators #-}
--- We use UndecidableInstances for type families with recursive definitions
--- like "\\".  Those instances will terminate since each equation unwraps one
--- cons cell of a type-level list.
-{-# LANGUAGE UndecidableInstances #-}
-
-module TensorFlow.Types
-    ( TensorType(..)
-    , TensorData(..)
-    , Shape(..)
-    , protoShape
-    , Attribute(..)
-    -- * Type constraints
-    , OneOf
-    , type (/=)
-    -- ** Implementation of constraints
-    , TypeError
-    , ExcludedCase
-    , TensorTypes
-    , NoneOf
-    , type (\\)
-    , Delete
-    , AllTensorTypes
-    ) where
-
-import Data.Complex (Complex)
-import Data.Default (def)
-import Data.Int (Int8, Int16, Int32, Int64)
-import Data.Monoid ((<>))
-import Data.Word (Word8, Word16, Word64)
-import Foreign.Storable (Storable)
-import GHC.Exts (Constraint, IsList(..))
-import Lens.Family2 (Lens', view, (&), (.~))
-import Lens.Family2.Unchecked (iso)
-import qualified Data.Attoparsec.ByteString as Atto
-import Data.ByteString (ByteString)
-import qualified Data.ByteString as B
-import Data.ByteString.Builder (Builder)
-import qualified Data.ByteString.Builder as Builder
-import qualified Data.ByteString.Lazy as L
-import qualified Data.Vector as V
-import qualified Data.Vector.Storable as S
-import Proto.Tensorflow.Core.Framework.AttrValue
-    ( AttrValue(..)
-    , AttrValue'ListValue(..)
-    , b
-    , f
-    , i
-    , s
-    , list
-    , type'
-    , shape
-    , tensor
-    )
-import Proto.Tensorflow.Core.Framework.Tensor as Tensor
-    ( TensorProto(..)
-    , floatVal
-    , doubleVal
-    , intVal
-    , stringVal
-    , int64Val
-    , stringVal
-    , boolVal
-    )
-import Proto.Tensorflow.Core.Framework.TensorShape
-    ( TensorShapeProto(..)
-    , dim
-    , size
-    )
-import Proto.Tensorflow.Core.Framework.Types (DataType(..))
-
-import TensorFlow.Internal.VarInt (getVarInt, putVarInt)
-import qualified TensorFlow.Internal.FFI as FFI
-
--- | Data about a tensor that is encoded for the TensorFlow APIs.
-newtype TensorData a = TensorData { unTensorData :: FFI.TensorData }
-
--- | The class of scalar types supported by tensorflow.
-class TensorType a where
-    tensorType :: a -> DataType
-    tensorRefType :: a -> DataType
-    tensorVal :: Lens' TensorProto [a]
-    -- | Decode the bytes of a TensorData into a Vector.
-    decodeTensorData :: TensorData a -> V.Vector a
-    -- | Encode a Vector into a TensorData.
-    --
-    -- The values should be in row major order, e.g.,
-    --
-    --   element 0:   index (0, ..., 0)
-    --   element 1:   index (0, ..., 1)
-    --   ...
-    encodeTensorData :: Shape -> V.Vector a -> TensorData a
-
--- All types, besides ByteString, are encoded as simple arrays and we can use
--- Vector.Storable to encode/decode by type casting pointers.
-
--- TODO(fmayle): Assert that the data type matches the return type.
-simpleDecode :: Storable a => TensorData a -> V.Vector a
-simpleDecode = S.convert . S.unsafeCast . FFI.tensorDataBytes . unTensorData
-
-simpleEncode :: forall a . (TensorType a, Storable a)
-             => Shape -> V.Vector a -> TensorData a
-simpleEncode (Shape xs)
-    = TensorData . FFI.TensorData xs dt . S.unsafeCast . S.convert
-  where
-    dt = tensorType (undefined :: a)
-
-instance TensorType Float where
-    tensorType _ = DT_FLOAT
-    tensorRefType _ = DT_FLOAT_REF
-    tensorVal = floatVal
-    decodeTensorData = simpleDecode
-    encodeTensorData = simpleEncode
-
-instance TensorType Double where
-    tensorType _ = DT_DOUBLE
-    tensorRefType _ = DT_DOUBLE_REF
-    tensorVal = doubleVal
-    decodeTensorData = simpleDecode
-    encodeTensorData = simpleEncode
-
-instance TensorType Int32 where
-    tensorType _ = DT_INT32
-    tensorRefType _ = DT_INT32_REF
-    tensorVal = intVal
-    decodeTensorData = simpleDecode
-    encodeTensorData = simpleEncode
-
-instance TensorType Int64 where
-    tensorType _ = DT_INT64
-    tensorRefType _ = DT_INT64_REF
-    tensorVal = int64Val
-    decodeTensorData = simpleDecode
-    encodeTensorData = simpleEncode
-
-integral :: Integral a => Lens' [Int32] [a]
-integral = iso (fmap fromIntegral) (fmap fromIntegral)
-
-instance TensorType Word8 where
-    tensorType _ = DT_UINT8
-    tensorRefType _ = DT_UINT8_REF
-    tensorVal = intVal . integral
-    decodeTensorData = simpleDecode
-    encodeTensorData = simpleEncode
-
-instance TensorType Word16 where
-    tensorType _ = DT_UINT16
-    tensorRefType _ = DT_UINT16_REF
-    tensorVal = intVal . integral
-    decodeTensorData = simpleDecode
-    encodeTensorData = simpleEncode
-
-instance TensorType Int16 where
-    tensorType _ = DT_INT16
-    tensorRefType _ = DT_INT16_REF
-    tensorVal = intVal . integral
-    decodeTensorData = simpleDecode
-    encodeTensorData = simpleEncode
-
-instance TensorType Int8 where
-    tensorType _ = DT_INT8
-    tensorRefType _ = DT_INT8_REF
-    tensorVal = intVal . integral
-    decodeTensorData = simpleDecode
-    encodeTensorData = simpleEncode
-
-instance TensorType ByteString where
-    tensorType _ = DT_STRING
-    tensorRefType _ = DT_STRING_REF
-    tensorVal = stringVal
-    -- Encoded data layout (described in third_party/tensorflow/c/c_api.h):
-    --   table offsets for each element :: [Word64]
-    --   at each element offset:
-    --     string length :: VarInt64
-    --     string data   :: [Word8]
-    -- TODO(fmayle): Benchmark these functions.
-    decodeTensorData tensorData =
-        either (\err -> error $ "Malformed TF_STRING tensor; " ++ err) id $
-            if expected /= count
-                then Left $ "decodeTensorData for ByteString count mismatch " ++
-                            show (expected, count)
-                else V.mapM decodeString (S.convert offsets)
-      where
-        expected = S.length offsets
-        count = fromIntegral $ product $ FFI.tensorDataDimensions
-                    $ unTensorData tensorData
-        bytes = FFI.tensorDataBytes $ unTensorData tensorData
-        offsets = S.take count $ S.unsafeCast bytes :: S.Vector Word64
-        dataBytes = B.pack $ S.toList $ S.drop (count * 8) bytes
-        decodeString :: Word64 -> Either String ByteString
-        decodeString offset =
-            let stringDataStart = B.drop (fromIntegral offset) dataBytes
-            in Atto.eitherResult $ Atto.parse stringParser stringDataStart
-        stringParser :: Atto.Parser ByteString
-        stringParser = getVarInt >>= Atto.take . fromIntegral
-    encodeTensorData (Shape xs) vec =
-        TensorData $ FFI.TensorData xs dt byteVector
-      where
-        dt = tensorType (undefined :: ByteString)
-        -- Add a string to an offset table and data blob.
-        addString :: (Builder, Builder, Word64)
-                  -> ByteString
-                  -> (Builder, Builder, Word64)
-        addString (table, strings, offset) str =
-            ( table <> Builder.word64LE offset
-            , strings <> lengthBytes <> Builder.byteString str
-            , offset + lengthBytesLen + strLen
-            )
-          where
-            strLen = fromIntegral $ B.length str
-            lengthBytes = putVarInt $ fromIntegral $ B.length str
-            lengthBytesLen =
-                fromIntegral $ L.length $ Builder.toLazyByteString lengthBytes
-        -- Encode all strings.
-        (table', strings', _) = V.foldl' addString (mempty, mempty, 0) vec
-        -- Concat offset table with data.
-        bytes = table' <> strings'
-        -- Convert to Vector Word8.
-        byteVector = S.fromList $ L.unpack $ Builder.toLazyByteString bytes
-
-
-instance TensorType Bool where
-    tensorType _ = DT_BOOL
-    tensorRefType _ = DT_BOOL_REF
-    tensorVal = boolVal
-    decodeTensorData = simpleDecode
-    encodeTensorData = simpleEncode
-
-instance TensorType (Complex Float) where
-    tensorType _ = DT_COMPLEX64
-    tensorRefType _ = DT_COMPLEX64
-    tensorVal = error "TODO (Complex Float)"
-    decodeTensorData = error "TODO (Complex Float)"
-    encodeTensorData = error "TODO (Complex Float)"
-
-instance TensorType (Complex Double) where
-    tensorType _ = DT_COMPLEX128
-    tensorRefType _ = DT_COMPLEX128
-    tensorVal = error "TODO (Complex Double)"
-    decodeTensorData = error "TODO (Complex Double)"
-    encodeTensorData = error "TODO (Complex Double)"
-
--- | Shape (dimensions) of a tensor.
-newtype Shape = Shape [Int64] deriving Show
-
-instance IsList Shape where
-    type Item Shape = Int64
-    fromList = Shape . fromList
-    toList (Shape ss) = toList ss
-
-protoShape :: Lens' TensorShapeProto Shape
-protoShape = iso protoToShape shapeToProto
-  where
-    protoToShape = Shape . fmap (view size) . view dim
-    shapeToProto (Shape ds) = def & dim .~ fmap (\d -> def & size .~ d) ds
-
-
-class Attribute a where
-    attrLens :: Lens' AttrValue a
-
-instance Attribute Float where
-    attrLens = f
-
-instance Attribute ByteString where
-    attrLens = s
-
-instance Attribute Int64 where
-    attrLens = i
-
-instance Attribute DataType where
-    attrLens = type'
-
-instance Attribute TensorProto where
-    attrLens = tensor
-
-instance Attribute Bool where
-    attrLens = b
-
-instance Attribute Shape where
-    attrLens = shape . protoShape
-
--- TODO(gnezdo): support generating list(Foo) from [Foo].
-instance Attribute AttrValue'ListValue where
-    attrLens = list
-
-instance Attribute [DataType] where
-    attrLens = list . type'
-
-instance Attribute [Int64] where
-    attrLens = list . i
-
--- | A 'Constraint' specifying the possible choices of a 'TensorType'.
---
--- We implement a 'Constraint' like @OneOf '[Double, Float] a@ by turning the
--- natural representation as a conjunction, i.e.,
---
--- @
---    a == Double || a == Float
--- @
---
--- into a disjunction like
---
--- @
---     a \/= Int32 && a \/= Int64 && a \/= ByteString && ...
--- @
---
--- using an enumeration of all the possible 'TensorType's.
-type OneOf ts a
-    = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a)
-
--- | A 'Constraint' checking that the input is a list of 'TensorType's.
--- Helps improve error messages when using 'OneOf'.
-type family TensorTypes ts :: Constraint where
-    TensorTypes '[] = ()
-    TensorTypes (t ': ts) = (TensorType t, TensorTypes ts)
-
--- | A constraint checking that two types are different.
-type family a /= b :: Constraint where
-    a /= a = TypeError a ~ ExcludedCase
-    a /= b = ()
-
--- | Helper types to produce a reasonable type error message when the Constraint
--- "a /= a" fails.
--- TODO(judahjacobson): Use ghc-8's CustomTypeErrors for this.
-data TypeError a
-data ExcludedCase
-
--- | An enumeration of all valid 'TensorType's.
-type AllTensorTypes =
-    -- NOTE: This list should be kept in sync with
-    -- TensorFlow.OpGen.dtTypeToHaskell.
-    -- TODO: Add support for Complex Float/Double.
-    '[ Float
-     , Double
-     , Int8
-     , Int16
-     , Int32
-     , Int64
-     , Word8
-     , Word16
-     , ByteString
-     , Bool
-     ]
-
--- | Removes a type from the given list of types.
-type family Delete a as where
-    Delete a '[] = '[]
-    Delete a (a ': as) = Delete a as
-    Delete a (b ': as) = b ': Delete a as
-
--- | Takes the difference of two lists of types.
-type family as \\ bs where
-    as \\ '[] = as
-    as \\ (b ': bs) = Delete b as \\ bs
-
--- | A constraint that the type @a@ doesn't appear in the type list @ts@.
--- Assumes that @a@ and each of the elements of @ts@ are 'TensorType's.
-type family NoneOf ts a :: Constraint where
-    NoneOf '[] a = ()
-    NoneOf (t ': ts) a = (a /= t, NoneOf ts a)
-
- diff --git a/docs/haddock/tensorflow-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-0.1.0.0/src/hscolour.css deleted file mode 100644 index c15919e..0000000 --- a/docs/haddock/tensorflow-0.1.0.0/src/hscolour.css +++ /dev/null @@ -1,5 +0,0 @@ -.hs-keyglyph, .hs-layout {color: red;} -.hs-keyword {color: blue;} -.hs-comment, .hs-comment a {color: green;} -.hs-str, .hs-chr {color: teal;} -.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-0.1.0.0/tensorflow.txt b/docs/haddock/tensorflow-0.1.0.0/tensorflow.txt index 1b28057..5986428 100644 --- a/docs/haddock/tensorflow-0.1.0.0/tensorflow.txt +++ b/docs/haddock/tensorflow-0.1.0.0/tensorflow.txt @@ -59,20 +59,29 @@ tensorType :: TensorType a => a -> DataType tensorRefType :: TensorType a => a -> DataType tensorVal :: TensorType a => Lens' TensorProto [a] --- | Decode the bytes of a TensorData into a Vector. -decodeTensorData :: TensorType a => TensorData a -> Vector a +-- | Tensor data with the correct memory layout for tensorflow. +newtype TensorData a +TensorData :: TensorData -> TensorData a +[unTensorData] :: TensorData a -> TensorData --- | Encode a Vector into a TensorData. +-- | Types that can be converted to and from TensorData. +-- +-- Vector is the most efficient to encode/decode for most element +-- types. +class TensorType a => TensorDataType s a + +-- | Decode the bytes of a TensorData into an s. +decodeTensorData :: TensorDataType s a => TensorData a -> s a + +-- | Encode an s into a TensorData. -- -- The values should be in row major order, e.g., -- -- element 0: index (0, ..., 0) element 1: index (0, ..., 1) ... -encodeTensorData :: TensorType a => Shape -> Vector a -> TensorData a - --- | Data about a tensor that is encoded for the TensorFlow APIs. -newtype TensorData a -TensorData :: TensorData -> TensorData a -[unTensorData] :: TensorData a -> TensorData +encodeTensorData :: TensorDataType s a => Shape -> s a -> TensorData a +newtype Scalar a +Scalar :: a -> Scalar a +[unScalar] :: Scalar a -> a -- | Shape (dimensions) of a tensor. newtype Shape @@ -80,6 +89,64 @@ Shape :: [Int64] -> Shape protoShape :: Lens' TensorShapeProto Shape class Attribute a attrLens :: Attribute a => Lens' AttrValue a +data DataType :: * +DT_INVALID :: DataType +DT_FLOAT :: DataType +DT_DOUBLE :: DataType +DT_INT32 :: DataType +DT_UINT8 :: DataType +DT_INT16 :: DataType +DT_INT8 :: DataType +DT_STRING :: DataType +DT_COMPLEX64 :: DataType +DT_INT64 :: DataType +DT_BOOL :: DataType +DT_QINT8 :: DataType +DT_QUINT8 :: DataType +DT_QINT32 :: DataType +DT_BFLOAT16 :: DataType +DT_QINT16 :: DataType +DT_QUINT16 :: DataType +DT_UINT16 :: DataType +DT_COMPLEX128 :: DataType +DT_HALF :: DataType +DT_RESOURCE :: DataType +DT_FLOAT_REF :: DataType +DT_DOUBLE_REF :: DataType +DT_INT32_REF :: DataType +DT_UINT8_REF :: DataType +DT_INT16_REF :: DataType +DT_INT8_REF :: DataType +DT_STRING_REF :: DataType +DT_COMPLEX64_REF :: DataType +DT_INT64_REF :: DataType +DT_BOOL_REF :: DataType +DT_QINT8_REF :: DataType +DT_QUINT8_REF :: DataType +DT_QINT32_REF :: DataType +DT_BFLOAT16_REF :: DataType +DT_QINT16_REF :: DataType +DT_QUINT16_REF :: DataType +DT_UINT16_REF :: DataType +DT_COMPLEX128_REF :: DataType +DT_HALF_REF :: DataType +DT_RESOURCE_REF :: DataType + +-- | A heterogeneous list type. +data ListOf f as +Nil :: ListOf f '[] +(:/) :: f a -> ListOf f as -> ListOf f (a : as) +type List = ListOf Identity + +-- | Equivalent of :/ for lists. +(/:/) :: a -> List as -> List (a : as) +data TensorTypeProxy a +TensorTypeProxy :: TensorTypeProxy a +class TensorTypes (ts :: [*]) +tensorTypes :: TensorTypes ts => TensorTypeList ts +type TensorTypeList = ListOf TensorTypeProxy +fromTensorTypeList :: TensorTypeList ts -> [DataType] +fromTensorTypes :: TensorTypes as => Proxy as -> [DataType] -- | A Constraint specifying the possible choices of a -- TensorType. @@ -101,6 +168,7 @@ attrLens :: Attribute a => Lens' AttrValue a type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a) -- | A constraint checking that two types are different. +type OneOfs ts as = (TensorTypes as, TensorTypes ts, NoneOfs (AllTensorTypes \\ ts) as) -- | Helper types to produce a reasonable type error message when the -- Constraint "a /= a" fails. TODO(judahjacobson): Use ghc-8's @@ -108,10 +176,6 @@ type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a data TypeError a data ExcludedCase --- | A Constraint checking that the input is a list of --- TensorTypes. Helps improve error messages when using --- OneOf. - -- | A constraint that the type a doesn't appear in the type list -- ts. Assumes that a and each of the elements of -- ts are TensorTypes. @@ -123,6 +187,16 @@ data ExcludedCase -- | An enumeration of all valid TensorTypes. type AllTensorTypes = '[Float, Double, Int8, Int16, Int32, Int64, Word8, Word16, ByteString, Bool] instance GHC.Show.Show TensorFlow.Types.Shape +instance Data.String.IsString a => Data.String.IsString (TensorFlow.Types.Scalar a) +instance GHC.Real.RealFrac a => GHC.Real.RealFrac (TensorFlow.Types.Scalar a) +instance GHC.Float.RealFloat a => GHC.Float.RealFloat (TensorFlow.Types.Scalar a) +instance GHC.Real.Real a => GHC.Real.Real (TensorFlow.Types.Scalar a) +instance GHC.Float.Floating a => GHC.Float.Floating (TensorFlow.Types.Scalar a) +instance GHC.Real.Fractional a => GHC.Real.Fractional (TensorFlow.Types.Scalar a) +instance GHC.Num.Num a => GHC.Num.Num (TensorFlow.Types.Scalar a) +instance GHC.Classes.Ord a => GHC.Classes.Ord (TensorFlow.Types.Scalar a) +instance GHC.Classes.Eq a => GHC.Classes.Eq (TensorFlow.Types.Scalar a) +instance GHC.Show.Show a => GHC.Show.Show (TensorFlow.Types.Scalar a) instance TensorFlow.Types.TensorType GHC.Types.Float instance TensorFlow.Types.TensorType GHC.Types.Double instance TensorFlow.Types.TensorType GHC.Int.Int32 @@ -135,6 +209,20 @@ instance TensorFlow.Types.TensorType Data.ByteString.Internal.ByteString instance TensorFlow.Types.TensorType GHC.Types.Bool instance TensorFlow.Types.TensorType (Data.Complex.Complex GHC.Types.Float) instance TensorFlow.Types.TensorType (Data.Complex.Complex GHC.Types.Double) +instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Types.Float +instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Types.Double +instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Int.Int8 +instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Int.Int16 +instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Int.Int32 +instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Int.Int64 +instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Word.Word8 +instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Word.Word16 +instance TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector GHC.Types.Bool +instance (Foreign.Storable.Storable a, TensorFlow.Types.TensorDataType Data.Vector.Storable.Vector a) => TensorFlow.Types.TensorDataType Data.Vector.Vector a +instance TensorFlow.Types.TensorDataType Data.Vector.Vector (Data.Complex.Complex GHC.Types.Float) +instance TensorFlow.Types.TensorDataType Data.Vector.Vector (Data.Complex.Complex GHC.Types.Double) +instance TensorFlow.Types.TensorDataType Data.Vector.Vector Data.ByteString.Internal.ByteString +instance TensorFlow.Types.TensorDataType Data.Vector.Vector a => TensorFlow.Types.TensorDataType TensorFlow.Types.Scalar a instance GHC.Exts.IsList TensorFlow.Types.Shape instance TensorFlow.Types.Attribute GHC.Types.Float instance TensorFlow.Types.Attribute Data.ByteString.Internal.ByteString @@ -146,14 +234,18 @@ instance TensorFlow.Types.Attribute TensorFlow.Types.Shape instance TensorFlow.Types.Attribute Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue instance TensorFlow.Types.Attribute [Proto.Tensorflow.Core.Framework.Types.DataType] instance TensorFlow.Types.Attribute [GHC.Int.Int64] +instance TensorFlow.Types.All GHC.Classes.Eq (TensorFlow.Types.Map f as) => GHC.Classes.Eq (TensorFlow.Types.ListOf f as) +instance TensorFlow.Types.All GHC.Show.Show (TensorFlow.Types.Map f as) => GHC.Show.Show (TensorFlow.Types.ListOf f as) +instance TensorFlow.Types.TensorTypes '[] +instance (TensorFlow.Types.TensorType t, TensorFlow.Types.TensorTypes ts) => TensorFlow.Types.TensorTypes (t : ts) module TensorFlow.Output -- | A type of graph node which has no outputs. These nodes are valuable -- for causing side effects when they are run. newtype ControlNode -ControlNode :: Op -> ControlNode -[unControlNode] :: ControlNode -> Op +ControlNode :: NodeName -> ControlNode +[unControlNode] :: ControlNode -> NodeName -- | A device that a node can be assigned to. There's a naming convention -- where the device names are constructed from job and replica names. @@ -168,22 +260,8 @@ newtype NodeName NodeName :: Text -> NodeName [unNodeName] :: NodeName -> Text --- | The representation of a node in a TensorFlow graph. -data Op - --- | Properties are fixed, including the device, name, and scope. -Rendered :: !NodeDef -> Op - --- | Properties are not fixed, and may change depending on which context --- this op is rendered in. -Unrendered :: !OpDef -> Op - --- | Traverse on the Unrendered of an Op. --- --- Same implementation as _Left. -opUnrendered :: Traversal' Op OpDef - --- | Op definition. This corresponds somewhat to the NodeDef proto. +-- | Op definition. This corresponds somewhat to the NodeDef +-- proto. data OpDef OpDef :: !PendingNodeName -> !OpType -> !(Map Text AttrValue) -> [Output] -> [NodeName] -> OpDef [_opName] :: OpDef -> !PendingNodeName @@ -208,10 +286,10 @@ OutputIx :: Int -> OutputIx -- | An output of a TensorFlow node. data Output -Output :: !OutputIx -> !Op -> Output -output :: OutputIx -> Op -> Output -outputIndex :: Lens' Output OutputIx -outputOp :: Lens' Output Op +Output :: !OutputIx -> !NodeName -> Output +[outputIndex] :: Output -> !OutputIx +[outputNodeName] :: Output -> !NodeName +output :: OutputIx -> NodeName -> Output -- | The name specified for an unrendered Op. If an Op has an ImplicitName, -- it will be assigned based on the opType plus a unique identifier. Does @@ -221,17 +299,14 @@ ExplicitName :: !Text -> PendingNodeName ImplicitName :: PendingNodeName -- | Opaque handle to a mutable resource in the graph. Typical such --- resources are variables. The type parameter corresponds to the dtype --- of the tensor held in the variable. -newtype ResourceHandle a -ResourceHandle :: Output -> ResourceHandle a -instance GHC.Classes.Ord TensorFlow.Output.Op -instance GHC.Classes.Eq TensorFlow.Output.Op +-- resources are variables. +newtype ResourceHandle +ResourceHandle :: Output -> ResourceHandle +instance GHC.Classes.Ord TensorFlow.Output.OpDef +instance GHC.Classes.Eq TensorFlow.Output.OpDef instance GHC.Show.Show TensorFlow.Output.Output instance GHC.Classes.Ord TensorFlow.Output.Output instance GHC.Classes.Eq TensorFlow.Output.Output -instance GHC.Classes.Ord TensorFlow.Output.OpDef -instance GHC.Classes.Eq TensorFlow.Output.OpDef instance GHC.Show.Show TensorFlow.Output.NodeName instance GHC.Classes.Ord TensorFlow.Output.NodeName instance GHC.Classes.Eq TensorFlow.Output.NodeName @@ -251,68 +326,16 @@ instance GHC.Classes.Ord TensorFlow.Output.OpType instance GHC.Classes.Eq TensorFlow.Output.OpType instance Data.String.IsString TensorFlow.Output.OpType instance GHC.Show.Show TensorFlow.Output.Device -instance GHC.Show.Show TensorFlow.Output.Op +instance Data.String.IsString TensorFlow.Output.PendingNodeName instance Data.String.IsString TensorFlow.Output.Output -module TensorFlow.Tensor - --- | A named output of a TensorFlow operation. --- --- The type parameter a is the type of the elements in the --- Tensor. The parameter v is either Value or --- Ref, depending on whether the graph is treating this op output --- as an immutable Value or a stateful Ref (e.g., a --- variable). Note that a Tensor Ref can be casted into a --- Tensor Value via value. -data Tensor v a -Tensor :: (TensorKind v) -> Output -> Tensor v a -data Value -data Ref - --- | This class provides a runtime switch on whether a Tensor should --- be treated as a Value or as a Ref. -data TensorKind v -ValueKind :: TensorKind Value -RefKind :: TensorKind Ref -tensorKind :: Lens' (Tensor v a) (TensorKind v) -tensorOutput :: Lens' (Tensor v a) Output - --- | Lens for the attributes of a tensor. --- --- Only valid if the tensor has not yet been rendered. If the tensor has --- been rendered, the traversal will be over nothing (nothing can be read --- or written). -tensorAttr :: Attribute attr => Text -> Traversal' (Tensor v a) attr - --- | Cast a 'Tensor *' into a 'Tensor Value'. Common usage is to cast a Ref --- into Value. This behaves like a no-op. -value :: Tensor v a -> Tensor Value a - --- | A pair of a Tensor and some data that should be fed into that --- Tensor when running the graph. -data Feed -Feed :: Output -> TensorData -> Feed - --- | Create a Feed for feeding the given data into a Tensor --- when running the graph. --- --- Note that if a Tensor is rendered, its identity may change; so --- feeding the rendered Tensor may be different than feeding the --- original Tensor. -feed :: Tensor v a -> TensorData a -> Feed - --- | Create a Tensor for a given name. This can be used to reference --- nodes in a GraphDef that was loaded via addGraphDef. --- TODO(judahjacobson): add more safety checks here. -tensorFromName :: TensorKind v -> Text -> Tensor v a - module TensorFlow.Build -- | A type of graph node which has no outputs. These nodes are valuable -- for causing side effects when they are run. newtype ControlNode -ControlNode :: Op -> ControlNode -[unControlNode] :: ControlNode -> Op +ControlNode :: NodeName -> ControlNode +[unControlNode] :: ControlNode -> NodeName data Unique explicitName :: Text -> PendingNodeName implicitName :: PendingNodeName @@ -324,19 +347,6 @@ opAttr :: Attribute a => Text -> Lens' OpDef a opInputs :: Lens' OpDef [Output] opControlInputs :: Lens' OpDef [NodeName] data GraphState - --- | Render a Tensor, fixing its name, scope, device and control --- inputs from the Build context. Also renders any dependencies of --- the Tensor that weren't already rendered. --- --- This operation is idempotent; render >=> render === --- render. However, rendering a (previously un-rendered) --- Tensor in two different contexts may result in two different --- Tensors. -render :: Tensor v a -> Build (Tensor v a) - --- | Render a Tensor and get its node's name. -renderNodeName :: Tensor v a -> Build NodeName renderedNodeDefs :: Lens' GraphState (Map NodeName NodeDef) -- | An action for building nodes in a TensorFlow graph. Used to manage @@ -346,8 +356,13 @@ data BuildT m a -- | An action for building nodes in a TensorFlow graph. type Build = BuildT Identity +-- | Lift a Build action into a monad, including any explicit op +-- renderings. +class Monad m => MonadBuild m +build :: MonadBuild m => Build a -> m a + -- | Registers the given node to be executed before the next run. -addInitializer :: ControlNode -> Build () +addInitializer :: MonadBuild m => ControlNode -> m () -- | This is Control.Monad.Morph.hoist sans the dependency. hoistBuildT :: (forall a. m a -> n a) -> BuildT m b -> BuildT n b @@ -357,7 +372,7 @@ runBuildT :: BuildT m a -> m (a, GraphState) -- | Produce a GraphDef proto representation of the nodes that are rendered -- in the given Build action. asGraphDef :: Build a -> GraphDef -addGraphDef :: GraphDef -> Build () +addGraphDef :: MonadBuild m => GraphDef -> m () -- | Get all the initializers that have accumulated so far, and clear that -- buffer. @@ -365,57 +380,41 @@ flushInitializers :: Monad m => BuildT m [NodeName] -- | Get all the NodeDefs that have accumulated so far, and clear that -- buffer. -flushNodeBuffer :: Monad m => BuildT m [NodeDef] +flushNodeBuffer :: MonadBuild m => m [NodeDef] +summaries :: Lens' GraphState [Output] -- | Render the given op if it hasn't been rendered already, and return its -- name. -getOrAddOp :: Op -> Build NodeName +getOrAddOp :: OpDef -> Build NodeName -- | Add a new node for a given OpDef. This is used for making -- "stateful" ops which are not safe to dedup (e.g, "variable" and -- "assign"). -addNewOp :: OpDef -> Build NodeDef +addNewOp :: OpDef -> Build NodeName --- | Render an Output and return a string representation for the --- TensorFlow foreign APIs. -renderOutput :: Output -> Build Text - --- | Places all nodes rendered in the given Build action on the same --- device as the given Tensor (see also withDevice). Make sure --- that the action has side effects of rendering the desired tensors. A --- pure return would not have the desired effect. -colocateWith :: Tensor v b -> Build a -> Build a +-- | Turn an Output into a string representation for the TensorFlow +-- foreign APIs. +encodeOutput :: Output -> Text +lookupNode :: NodeName -> Build NodeDef -- | Modify some part of the state, run an action, and restore the state -- after that action is done. -withStateLens :: MonadState s m => Lens' s a -> (a -> a) -> m b -> m b +withStateLens :: MonadBuild m => Lens' GraphState a -> (a -> a) -> m b -> m b -- | Set a device for all nodes rendered in the given Build action -- (unless further overridden by another use of withDevice). -withDevice :: Maybe Device -> Build a -> Build a +withDevice :: MonadBuild m => Maybe Device -> m a -> m a -- | Prepend a scope to all nodes rendered in the given Build -- action. -withNameScope :: Text -> Build a -> Build a +withNameScope :: MonadBuild m => Text -> m a -> m a -- | Add control inputs to all nodes rendered in the given Build -- action. -withNodeDependencies :: Set NodeName -> Build a -> Build a - --- | Records the given summary action in Build for retrieval with --- collectAllSummaries. The summary op is required to produce a --- Summary protocol buffer in string form. For safety, use the --- pre-composed functions: Logging.scalarSummary and --- Logging.histogramSummary. -addSummary :: SummaryTensor -> Build () - --- | Synonym for the tensors that return serialized Summary proto. -type SummaryTensor = Tensor Value ByteString - --- | Retrieves the summary ops collected thus far. Typically this only --- happens once, but if buildWithSummary is used repeatedly, the --- values accumulate. -collectAllSummaries :: Monad m => BuildT m [SummaryTensor] +withNodeDependencies :: MonadBuild m => Set NodeName -> m a -> m a +instance Control.Monad.Catch.MonadMask m => Control.Monad.Catch.MonadMask (TensorFlow.Build.BuildT m) +instance Control.Monad.Catch.MonadCatch m => Control.Monad.Catch.MonadCatch (TensorFlow.Build.BuildT m) +instance Control.Monad.Catch.MonadThrow m => Control.Monad.Catch.MonadThrow (TensorFlow.Build.BuildT m) instance GHC.Base.Monad m => Control.Monad.State.Class.MonadState TensorFlow.Build.GraphState (TensorFlow.Build.BuildT m) instance Control.Monad.Trans.Class.MonadTrans TensorFlow.Build.BuildT instance Control.Monad.IO.Class.MonadIO m => Control.Monad.IO.Class.MonadIO (TensorFlow.Build.BuildT m) @@ -431,50 +430,164 @@ instance GHC.Enum.Enum TensorFlow.Build.Unique instance GHC.Classes.Ord TensorFlow.Build.Unique instance GHC.Classes.Eq TensorFlow.Build.Unique instance GHC.Show.Show TensorFlow.Build.Scope +instance GHC.Base.Monad m => TensorFlow.Build.MonadBuild (TensorFlow.Build.BuildT m) + +module TensorFlow.Tensor + +-- | A named output of a TensorFlow operation. +-- +-- The type parameter a is the type of the elements in the +-- Tensor. The parameter v is either: +-- +--
    +--
  • Build: An unrendered, immutable value.
  • +--
  • Value: A rendered, immutable value.
  • +--
  • Ref: A rendered stateful handle (e.g., a variable).
  • +--
+-- +-- Note that expr, value, render and +-- renderValue can help convert between the different types of +-- Tensor. +data Tensor v a +Tensor :: v Output -> Tensor v a +[tensorOutput] :: Tensor v a -> v Output +newtype Value a +Value :: a -> Value a +[runValue] :: Value a -> a +newtype Ref a +Ref :: a -> Ref a +[runRef] :: Ref a -> a + +-- | Cast a 'Tensor Ref' into a 'Tensor Value'. This behaves like a no-op. +value :: Tensor Ref a -> Tensor Value a +renderValue :: MonadBuild m => Tensor v a -> m (Tensor Value a) + +-- | A pair of a Tensor and some data that should be fed into that +-- Tensor when running the graph. +data Feed +Feed :: Output -> TensorData -> Feed + +-- | A class ensuring that a given tensor is rendered, i.e., has a fixed +-- name, device, etc. +class TensorKind v => Rendered v +rendered :: Rendered v => v a -> a +renderedOutput :: Rendered v => Tensor v a -> Output +tensorNodeName :: Rendered v => Tensor v a -> NodeName + +-- | Create a Feed for feeding the given data into a Tensor +-- when running the graph. +-- +-- Note that if a Tensor is rendered, its identity may change; so +-- feeding the rendered Tensor may be different than feeding the +-- original Tensor. +feed :: Rendered v => Tensor v a -> TensorData a -> Feed + +-- | Create a Tensor for a given name. This can be used to reference +-- nodes in a GraphDef that was loaded via addGraphDef. +-- TODO(judahjacobson): add more safety checks here. +tensorFromName :: TensorKind v => Text -> Tensor v a + +-- | Like tensorFromName, but type-restricted to Value. +tensorValueFromName :: Text -> Tensor Value a + +-- | Like tensorFromName, but type-restricted to Ref. +tensorRefFromName :: Text -> Tensor Ref a +type TensorList v = ListOf (Tensor v) +tensorListOutputs :: Rendered v => TensorList v as -> [Output] + +-- | Places all nodes rendered in the given Build action on the same +-- device as the given Tensor (see also withDevice). Make sure +-- that the action has side effects of rendering the desired tensors. A +-- pure return would not have the desired effect. +colocateWith :: (MonadBuild m, Rendered v) => Tensor v b -> m a -> m a + +-- | Render a Tensor, fixing its name, scope, device and control +-- inputs from the MonadBuild context. Also renders any +-- dependencies of the Tensor that weren't already rendered. +-- +-- This operation is idempotent; calling render on the same input +-- in the same context will produce the same result. However, rendering +-- the same Tensor Build in two different contexts may result in +-- two different Tensor Values. +render :: MonadBuild m => Tensor Build a -> m (Tensor Value a) +expr :: TensorKind v => Tensor v a -> Tensor Build a + +-- | Records the given summary action in Build for retrieval with Summary +-- protocol buffer in string form. For safety, use the pre-composed +-- functions: Logging.scalarSummary and Logging.histogramSummary. +addSummary :: (MonadBuild m, TensorKind v) => Tensor v ByteString -> m () + +-- | Retrieves the summary ops collected thus far. Typically this only +-- happens once, but if buildWithSummary is used repeatedly, the +-- values accumulate. +collectAllSummaries :: MonadBuild m => m [SummaryTensor] + +-- | Synonym for the tensors that return serialized Summary proto. +type SummaryTensor = Tensor Value ByteString + +-- | An internal class for kinds of Tensors. +class Monad v => TensorKind v +toBuild :: TensorKind v => v a -> Build a +instance GHC.Base.Functor TensorFlow.Tensor.Ref +instance GHC.Base.Functor TensorFlow.Tensor.Value +instance GHC.Base.Applicative TensorFlow.Tensor.Value +instance GHC.Base.Monad TensorFlow.Tensor.Value +instance GHC.Base.Applicative TensorFlow.Tensor.Ref +instance GHC.Base.Monad TensorFlow.Tensor.Ref +instance TensorFlow.Tensor.Rendered TensorFlow.Tensor.Value +instance TensorFlow.Tensor.Rendered TensorFlow.Tensor.Ref +instance TensorFlow.Tensor.TensorKind TensorFlow.Tensor.Value +instance TensorFlow.Tensor.TensorKind TensorFlow.Tensor.Ref +instance TensorFlow.Tensor.TensorKind TensorFlow.Build.Build module TensorFlow.BuildOp -- | Class of types that can be used as op outputs. -class OpResult a +class BuildResult a +buildResult :: BuildResult a => Result a +buildOp :: BuildResult a => [Int64] -> OpDef -> Build a --- | Class of types that can be used as op functions. -class BuildOp f - --- | Starts an operation that returns a structured set of tensors --- (singletons or tuples). -buildOp :: BuildOp f => OpDef -> f - --- | Starts an operation that returns a list of tensors. -buildListOp :: BuildOp f => [Int64] -> OpDef -> f +-- | Class of types that can be used as op outputs. +class PureResult a +pureResult :: PureResult a => ReaderT (Build OpDef) (State ResultState) a +pureOp :: PureResult a => [Int64] -> Build OpDef -> a -- | Returns true if all the integers in each tuple are identical. Throws -- an error with a descriptive message if not. eqLengthGuard :: [(String, [(String, Int)])] -> Bool +class BuildInputs a +buildInputs :: BuildInputs a => a -> Build [Output] + +-- | Parameters to build an op (for example, the node name or optional +-- attributes). TODO: be more type safe. +type OpParams = OpDef -> OpDef instance GHC.Show.Show TensorFlow.BuildOp.ResultState -instance (TensorFlow.BuildOp.OpResult a1, TensorFlow.BuildOp.OpResult a2) => TensorFlow.BuildOp.OpResult (a1, a2) -instance (TensorFlow.BuildOp.OpResult a1, TensorFlow.BuildOp.OpResult a2, TensorFlow.BuildOp.OpResult a3) => TensorFlow.BuildOp.OpResult (a1, a2, a3) -instance (TensorFlow.BuildOp.OpResult a1, TensorFlow.BuildOp.OpResult a2, TensorFlow.BuildOp.OpResult a3, TensorFlow.BuildOp.OpResult a4) => TensorFlow.BuildOp.OpResult (a1, a2, a3, a4) -instance (TensorFlow.BuildOp.OpResult a1, TensorFlow.BuildOp.OpResult a2, TensorFlow.BuildOp.OpResult a3, TensorFlow.BuildOp.OpResult a4, TensorFlow.BuildOp.OpResult a5) => TensorFlow.BuildOp.OpResult (a1, a2, a3, a4, a5) -instance (TensorFlow.BuildOp.OpResult a1, TensorFlow.BuildOp.OpResult a2, TensorFlow.BuildOp.OpResult a3, TensorFlow.BuildOp.OpResult a4, TensorFlow.BuildOp.OpResult a5, TensorFlow.BuildOp.OpResult a6) => TensorFlow.BuildOp.OpResult (a1, a2, a3, a4, a5, a6) -instance TensorFlow.BuildOp.OpResult (TensorFlow.Output.ResourceHandle a) -instance TensorFlow.BuildOp.OpResult (TensorFlow.Tensor.Tensor TensorFlow.Tensor.Value a) -instance TensorFlow.BuildOp.OpResult (TensorFlow.Tensor.Tensor TensorFlow.Tensor.Ref a) -instance TensorFlow.BuildOp.OpResult TensorFlow.Output.ControlNode -instance TensorFlow.BuildOp.OpResult a => TensorFlow.BuildOp.OpResult [a] -instance TensorFlow.BuildOp.BuildOp TensorFlow.Output.ControlNode -instance TensorFlow.BuildOp.BuildOp (TensorFlow.Output.ResourceHandle a) -instance TensorFlow.BuildOp.BuildOp (TensorFlow.Tensor.Tensor TensorFlow.Tensor.Value a) -instance TensorFlow.BuildOp.BuildOp (TensorFlow.Tensor.Tensor TensorFlow.Tensor.Ref a) -instance TensorFlow.BuildOp.BuildOp [TensorFlow.Tensor.Tensor TensorFlow.Tensor.Value a] -instance (TensorFlow.BuildOp.OpResult t1, TensorFlow.BuildOp.OpResult t2) => TensorFlow.BuildOp.BuildOp (t1, t2) -instance (TensorFlow.BuildOp.OpResult t1, TensorFlow.BuildOp.OpResult t2, TensorFlow.BuildOp.OpResult t3) => TensorFlow.BuildOp.BuildOp (t1, t2, t3) -instance (TensorFlow.BuildOp.OpResult t1, TensorFlow.BuildOp.OpResult t2, TensorFlow.BuildOp.OpResult t3, TensorFlow.BuildOp.OpResult t4) => TensorFlow.BuildOp.BuildOp (t1, t2, t3, t4) -instance (TensorFlow.BuildOp.OpResult t1, TensorFlow.BuildOp.OpResult t2, TensorFlow.BuildOp.OpResult t3, TensorFlow.BuildOp.OpResult t4, TensorFlow.BuildOp.OpResult t5) => TensorFlow.BuildOp.BuildOp (t1, t2, t3, t4, t5) -instance (TensorFlow.BuildOp.OpResult t1, TensorFlow.BuildOp.OpResult t2, TensorFlow.BuildOp.OpResult t3, TensorFlow.BuildOp.OpResult t4, TensorFlow.BuildOp.OpResult t5, TensorFlow.BuildOp.OpResult t6) => TensorFlow.BuildOp.BuildOp (t1, t2, t3, t4, t5, t6) -instance TensorFlow.BuildOp.OpResult a => TensorFlow.BuildOp.BuildOp (TensorFlow.Build.Build a) -instance TensorFlow.BuildOp.BuildOp f => TensorFlow.BuildOp.BuildOp (TensorFlow.Output.ResourceHandle a -> f) -instance TensorFlow.BuildOp.BuildOp f => TensorFlow.BuildOp.BuildOp (TensorFlow.Tensor.Tensor v a -> f) -instance TensorFlow.BuildOp.BuildOp f => TensorFlow.BuildOp.BuildOp ([TensorFlow.Tensor.Tensor v a] -> f) +instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2) => TensorFlow.BuildOp.BuildResult (a1, a2) +instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3) => TensorFlow.BuildOp.BuildResult (a1, a2, a3) +instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3, TensorFlow.BuildOp.BuildResult a4) => TensorFlow.BuildOp.BuildResult (a1, a2, a3, a4) +instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3, TensorFlow.BuildOp.BuildResult a4, TensorFlow.BuildOp.BuildResult a5) => TensorFlow.BuildOp.BuildResult (a1, a2, a3, a4, a5) +instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3, TensorFlow.BuildOp.BuildResult a4, TensorFlow.BuildOp.BuildResult a5, TensorFlow.BuildOp.BuildResult a6) => TensorFlow.BuildOp.BuildResult (a1, a2, a3, a4, a5, a6) +instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3, TensorFlow.BuildOp.BuildResult a4, TensorFlow.BuildOp.BuildResult a5, TensorFlow.BuildOp.BuildResult a6, TensorFlow.BuildOp.BuildResult a7) => TensorFlow.BuildOp.BuildResult (a1, a2, a3, a4, a5, a6, a7) +instance (TensorFlow.BuildOp.BuildResult a1, TensorFlow.BuildOp.BuildResult a2, TensorFlow.BuildOp.BuildResult a3, TensorFlow.BuildOp.BuildResult a4, TensorFlow.BuildOp.BuildResult a5, TensorFlow.BuildOp.BuildResult a6, TensorFlow.BuildOp.BuildResult a7, TensorFlow.BuildOp.BuildResult a8) => TensorFlow.BuildOp.BuildResult (a1, a2, a3, a4, a5, a6, a7, a8) +instance TensorFlow.BuildOp.BuildResult TensorFlow.Output.ResourceHandle +instance TensorFlow.Tensor.Rendered v => TensorFlow.BuildOp.BuildResult (TensorFlow.Tensor.Tensor v a) +instance TensorFlow.BuildOp.BuildResult TensorFlow.Output.ControlNode +instance (TensorFlow.Tensor.Rendered v, TensorFlow.Types.TensorTypes as) => TensorFlow.BuildOp.BuildResult (TensorFlow.Tensor.TensorList v as) +instance TensorFlow.BuildOp.BuildResult a => TensorFlow.BuildOp.BuildResult [a] +instance TensorFlow.BuildOp.PureResult (TensorFlow.Tensor.Tensor TensorFlow.Build.Build a) +instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2) => TensorFlow.BuildOp.PureResult (a1, a2) +instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3) => TensorFlow.BuildOp.PureResult (a1, a2, a3) +instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3, TensorFlow.BuildOp.PureResult a4) => TensorFlow.BuildOp.PureResult (a1, a2, a3, a4) +instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3, TensorFlow.BuildOp.PureResult a4, TensorFlow.BuildOp.PureResult a5) => TensorFlow.BuildOp.PureResult (a1, a2, a3, a4, a5) +instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3, TensorFlow.BuildOp.PureResult a4, TensorFlow.BuildOp.PureResult a5, TensorFlow.BuildOp.PureResult a6) => TensorFlow.BuildOp.PureResult (a1, a2, a3, a4, a5, a6) +instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3, TensorFlow.BuildOp.PureResult a4, TensorFlow.BuildOp.PureResult a5, TensorFlow.BuildOp.PureResult a6, TensorFlow.BuildOp.PureResult a7) => TensorFlow.BuildOp.PureResult (a1, a2, a3, a4, a5, a6, a7) +instance (TensorFlow.BuildOp.PureResult a1, TensorFlow.BuildOp.PureResult a2, TensorFlow.BuildOp.PureResult a3, TensorFlow.BuildOp.PureResult a4, TensorFlow.BuildOp.PureResult a5, TensorFlow.BuildOp.PureResult a6, TensorFlow.BuildOp.PureResult a7, TensorFlow.BuildOp.PureResult a8) => TensorFlow.BuildOp.PureResult (a1, a2, a3, a4, a5, a6, a7, a8) +instance TensorFlow.BuildOp.PureResult a => TensorFlow.BuildOp.PureResult [a] +instance TensorFlow.Types.TensorTypes as => TensorFlow.BuildOp.PureResult (TensorFlow.Tensor.TensorList TensorFlow.Build.Build as) +instance TensorFlow.BuildOp.BuildInputs a => TensorFlow.BuildOp.BuildInputs [a] +instance TensorFlow.BuildOp.BuildInputs (TensorFlow.Tensor.Tensor v a) +instance TensorFlow.BuildOp.BuildInputs (TensorFlow.Types.ListOf (TensorFlow.Tensor.Tensor v) as) +instance TensorFlow.BuildOp.BuildInputs TensorFlow.Output.ResourceHandle module TensorFlow.Nodes @@ -500,21 +613,7 @@ Fetch :: Set Text -> (Map Text TensorData -> a) -> Fetch a -- | Function to create an a from the fetched data. [fetchRestore] :: Fetch a -> Map Text TensorData -> a nodesUnion :: (Monoid b, Traversable t, Applicative f) => t (f b) -> f b -fetchTensorList :: TensorType a => Tensor v a -> Build (Fetch (Shape, [a])) -fetchTensorVector :: TensorType a => Tensor v a -> Build (Fetch (Shape, Vector a)) -newtype Scalar a -Scalar :: a -> Scalar a -[unScalar] :: Scalar a -> a -instance Data.String.IsString a => Data.String.IsString (TensorFlow.Nodes.Scalar a) -instance GHC.Real.RealFrac a => GHC.Real.RealFrac (TensorFlow.Nodes.Scalar a) -instance GHC.Float.RealFloat a => GHC.Float.RealFloat (TensorFlow.Nodes.Scalar a) -instance GHC.Real.Real a => GHC.Real.Real (TensorFlow.Nodes.Scalar a) -instance GHC.Float.Floating a => GHC.Float.Floating (TensorFlow.Nodes.Scalar a) -instance GHC.Real.Fractional a => GHC.Real.Fractional (TensorFlow.Nodes.Scalar a) -instance GHC.Num.Num a => GHC.Num.Num (TensorFlow.Nodes.Scalar a) -instance GHC.Classes.Ord a => GHC.Classes.Ord (TensorFlow.Nodes.Scalar a) -instance GHC.Classes.Eq a => GHC.Classes.Eq (TensorFlow.Nodes.Scalar a) -instance GHC.Show.Show a => GHC.Show.Show (TensorFlow.Nodes.Scalar a) +fetchTensorVector :: (TensorType a) => Tensor v a -> Build (Fetch (TensorData a)) instance GHC.Base.Functor TensorFlow.Nodes.Fetch instance GHC.Base.Applicative TensorFlow.Nodes.Fetch instance (TensorFlow.Nodes.Nodes t1, TensorFlow.Nodes.Nodes t2) => TensorFlow.Nodes.Nodes (t1, t2) @@ -525,36 +624,28 @@ instance TensorFlow.Nodes.Nodes t => TensorFlow.Nodes.Nodes [t] instance TensorFlow.Nodes.Fetchable t a => TensorFlow.Nodes.Fetchable [t] [a] instance TensorFlow.Nodes.Nodes TensorFlow.Output.ControlNode instance (a ~ ()) => TensorFlow.Nodes.Fetchable TensorFlow.Output.ControlNode a +instance TensorFlow.Nodes.Nodes (TensorFlow.Types.ListOf f '[]) +instance (TensorFlow.Nodes.Nodes (f a), TensorFlow.Nodes.Nodes (TensorFlow.Types.ListOf f as)) => TensorFlow.Nodes.Nodes (TensorFlow.Types.ListOf f (a : as)) +instance (l ~ TensorFlow.Types.List '[]) => TensorFlow.Nodes.Fetchable (TensorFlow.Types.ListOf f '[]) l +instance (TensorFlow.Nodes.Fetchable (f t) a, TensorFlow.Nodes.Fetchable (TensorFlow.Types.ListOf f ts) (TensorFlow.Types.List as), i ~ Data.Functor.Identity.Identity) => TensorFlow.Nodes.Fetchable (TensorFlow.Types.ListOf f (t : ts)) (TensorFlow.Types.ListOf i (a : as)) instance TensorFlow.Nodes.Nodes (TensorFlow.Tensor.Tensor v a) -instance (TensorFlow.Types.TensorType a, a ~ a') => TensorFlow.Nodes.Fetchable (TensorFlow.Tensor.Tensor v a) (Data.Vector.Vector a') -instance (TensorFlow.Types.TensorType a, a ~ a') => TensorFlow.Nodes.Fetchable (TensorFlow.Tensor.Tensor v a) (TensorFlow.Nodes.Scalar a') +instance (TensorFlow.Types.TensorType a, a ~ a') => TensorFlow.Nodes.Fetchable (TensorFlow.Tensor.Tensor v a) (TensorFlow.Types.TensorData a') +instance (TensorFlow.Types.TensorType a, TensorFlow.Types.TensorDataType s a, a ~ a') => TensorFlow.Nodes.Fetchable (TensorFlow.Tensor.Tensor v a) (s a') module TensorFlow.ControlFlow -- | Modify a Build action, such that all new ops rendered in it -- will depend on the nodes in the first argument. -withControlDependencies :: Nodes t => t -> Build a -> Build a +withControlDependencies :: (MonadBuild m, Nodes t) => t -> m a -> m a -- | Create an op that groups multiple operations. -- -- When this op finishes, all ops in the input n have finished. -- This op has no output. -group :: Nodes t => t -> Build ControlNode - --- | Returns a Tensor with the same shape and contents as the input. -identity :: TensorType a => Tensor v a -> Tensor v a +group :: (MonadBuild m, Nodes t) => t -> m ControlNode -- | Does nothing. Only useful as a placeholder for control edges. -noOp :: ControlNode - --- | Returns a Tensor with a given name and the same shape and --- contents as the input. --- --- TODO(judahjacobson): This breaks when used with uninitialize --- Tensor Refs, since RefIdentity doesn't have --- SetAllowsUninitializedInput(). Look into whether we can change that --- op. -named :: TensorType a => Text -> Tensor v a -> Tensor v a +noOp :: MonadBuild m => m ControlNode module TensorFlow.Session data Session a @@ -581,22 +672,10 @@ runSession :: Session a -> IO a -- sessionConfig). runSessionWithOptions :: Options -> Session a -> IO a --- | Lift a Build action into a Session, including any --- explicit op renderings. -build :: Build a -> Session a - --- | Helper combinator for doing something with the result of a --- Build action. Example usage: --- ---
---   buildAnd run :: Fetchable t a => Build t -> Session a
---   
-buildAnd :: (a -> Session b) -> Build a -> Session b - --- | Lift a Build action into a Session, including any --- explicit op renderings. Returns the merged summary ops which can be --- used for logging, see build for a convenient wrapper. -buildWithSummary :: Build a -> Session (a, [SummaryTensor]) +-- | Lift a Build action into a monad, including any explicit op +-- renderings. +class Monad m => MonadBuild m +build :: MonadBuild m => Build a -> m a -- | Add all pending rendered nodes to the TensorFlow graph and runs any -- pending initializers. @@ -604,7 +683,7 @@ buildWithSummary :: Build a -> Session (a, [SummaryTensor]) -- Note that run, runWithFeeds, etc. will all call this function -- implicitly. extend :: Session () -addGraphDef :: GraphDef -> Build () +addGraphDef :: MonadBuild m => GraphDef -> m () -- | Run a subgraph t, rendering any dependent nodes that aren't -- already rendered, and fetch the corresponding values for a. @@ -630,11 +709,15 @@ runWithFeeds_ :: Nodes t => [Feed] -> t -> Session () -- until runSession exits or an exception occurs. Graph extension happens -- synchronously, but the resultant run proceeds as a separate thread. asyncProdNodes :: Nodes t => t -> Session () +instance Control.Monad.Catch.MonadMask TensorFlow.Session.Session +instance Control.Monad.Catch.MonadCatch TensorFlow.Session.Session +instance Control.Monad.Catch.MonadThrow TensorFlow.Session.Session instance Control.Monad.IO.Class.MonadIO TensorFlow.Session.Session instance GHC.Base.Monad TensorFlow.Session.Session instance GHC.Base.Applicative TensorFlow.Session.Session instance GHC.Base.Functor TensorFlow.Session.Session instance Data.Default.Class.Default TensorFlow.Session.Options +instance TensorFlow.Build.MonadBuild TensorFlow.Session.Session -- | The core functionality of TensorFlow. @@ -669,31 +752,16 @@ runSession :: Session a -> IO a -- sessionConfig). runSessionWithOptions :: Options -> Session a -> IO a --- | Lift a Build action into a Session, including any --- explicit op renderings. -build :: Build a -> Session a - --- | Helper combinator for doing something with the result of a --- Build action. Example usage: --- ---
---   buildAnd run :: Fetchable t a => Build t -> Session a
---   
-buildAnd :: (a -> Session b) -> Build a -> Session b - --- | Lift a Build action into a Session, including any --- explicit op renderings. Returns the merged summary ops which can be --- used for logging, see build for a convenient wrapper. -buildWithSummary :: Build a -> Session (a, [SummaryTensor]) +-- | Lift a Build action into a monad, including any explicit op +-- renderings. +class Monad m => MonadBuild m +build :: MonadBuild m => Build a -> m a -- | Types that tensor representations (e.g. Tensor, -- ControlNode) can be fetched into. -- -- Includes collections of tensors (e.g. tuples). class Nodes t => Fetchable t a -newtype Scalar a -Scalar :: a -> Scalar a -[unScalar] :: Scalar a -> a -- | Types that contain ops which can be run. class Nodes t @@ -717,7 +785,7 @@ data Feed -- Note that if a Tensor is rendered, its identity may change; so -- feeding the rendered Tensor may be different than feeding the -- original Tensor. -feed :: Tensor v a -> TensorData a -> Feed +feed :: Rendered v => Tensor v a -> TensorData a -> Feed -- | Run a subgraph t, rendering any dependent nodes that aren't -- already rendered, feed the given input values, and fetch the @@ -743,19 +811,21 @@ type Build = BuildT Identity data BuildT m a -- | Render a Tensor, fixing its name, scope, device and control --- inputs from the Build context. Also renders any dependencies of --- the Tensor that weren't already rendered. +-- inputs from the MonadBuild context. Also renders any +-- dependencies of the Tensor that weren't already rendered. -- --- This operation is idempotent; render >=> render === --- render. However, rendering a (previously un-rendered) --- Tensor in two different contexts may result in two different --- Tensors. -render :: Tensor v a -> Build (Tensor v a) +-- This operation is idempotent; calling render on the same input +-- in the same context will produce the same result. However, rendering +-- the same Tensor Build in two different contexts may result in +-- two different Tensor Values. +render :: MonadBuild m => Tensor Build a -> m (Tensor Value a) -- | Produce a GraphDef proto representation of the nodes that are rendered -- in the given Build action. asGraphDef :: Build a -> GraphDef -addGraphDef :: GraphDef -> Build () +addGraphDef :: MonadBuild m => GraphDef -> m () +opName :: Lens' OpDef PendingNodeName +opAttr :: Attribute a => Text -> Lens' OpDef a -- | A type of graph node which has no outputs. These nodes are valuable -- for causing side effects when they are run. @@ -764,52 +834,54 @@ data ControlNode -- | A named output of a TensorFlow operation. -- -- The type parameter a is the type of the elements in the --- Tensor. The parameter v is either Value or --- Ref, depending on whether the graph is treating this op output --- as an immutable Value or a stateful Ref (e.g., a --- variable). Note that a Tensor Ref can be casted into a --- Tensor Value via value. -data Tensor v a -data Value -data Ref - --- | This class provides a runtime switch on whether a Tensor should --- be treated as a Value or as a Ref. -data TensorKind v -ValueKind :: TensorKind Value -RefKind :: TensorKind Ref - --- | Lens for the attributes of a tensor. +-- Tensor. The parameter v is either: -- --- Only valid if the tensor has not yet been rendered. If the tensor has --- been rendered, the traversal will be over nothing (nothing can be read --- or written). -tensorAttr :: Attribute attr => Text -> Traversal' (Tensor v a) attr +--
    +--
  • Build: An unrendered, immutable value.
  • +--
  • Value: A rendered, immutable value.
  • +--
  • Ref: A rendered stateful handle (e.g., a variable).
  • +--
+-- +-- Note that expr, value, render and +-- renderValue can help convert between the different types of +-- Tensor. +data Tensor v a +data Value a +data Ref a --- | Cast a 'Tensor *' into a 'Tensor Value'. Common usage is to cast a Ref --- into Value. This behaves like a no-op. -value :: Tensor v a -> Tensor Value a +-- | Cast a 'Tensor Ref' into a 'Tensor Value'. This behaves like a no-op. +value :: Tensor Ref a -> Tensor Value a -- | Create a Tensor for a given name. This can be used to reference --- nodes in a GraphDef that was loaded via addGraphDef. +-- nodes in a GraphDef that was loaded via addGraphDef. -- TODO(judahjacobson): add more safety checks here. -tensorFromName :: TensorKind v -> Text -> Tensor v a - --- | Data about a tensor that is encoded for the TensorFlow APIs. -data TensorData a +tensorFromName :: TensorKind v => Text -> Tensor v a +expr :: TensorKind v => Tensor v a -> Tensor Build a -- | The class of scalar types supported by tensorflow. class TensorType a --- | Decode the bytes of a TensorData into a Vector. -decodeTensorData :: TensorType a => TensorData a -> Vector a +-- | Tensor data with the correct memory layout for tensorflow. +data TensorData a --- | Encode a Vector into a TensorData. +-- | Types that can be converted to and from TensorData. +-- +-- Vector is the most efficient to encode/decode for most element +-- types. +class TensorType a => TensorDataType s a + +-- | Decode the bytes of a TensorData into an s. +decodeTensorData :: TensorDataType s a => TensorData a -> s a + +-- | Encode an s into a TensorData. -- -- The values should be in row major order, e.g., -- -- element 0: index (0, ..., 0) element 1: index (0, ..., 1) ... -encodeTensorData :: TensorType a => Shape -> Vector a -> TensorData a +encodeTensorData :: TensorDataType s a => Shape -> s a -> TensorData a +newtype Scalar a +Scalar :: a -> Scalar a +[unScalar] :: Scalar a -> a -- | Shape (dimensions) of a tensor. newtype Shape @@ -840,7 +912,7 @@ type OneOf ts a = (TensorType a, TensorTypes ts, NoneOf (AllTensorTypes \\ ts) a -- device as the given Tensor (see also withDevice). Make sure -- that the action has side effects of rendering the desired tensors. A -- pure return would not have the desired effect. -colocateWith :: Tensor v b -> Build a -> Build a +colocateWith :: (MonadBuild m, Rendered v) => Tensor v b -> m a -> m a -- | A device that a node can be assigned to. There's a naming convention -- where the device names are constructed from job and replica names. @@ -850,33 +922,21 @@ Device :: Text -> Device -- | Set a device for all nodes rendered in the given Build action -- (unless further overridden by another use of withDevice). -withDevice :: Maybe Device -> Build a -> Build a +withDevice :: MonadBuild m => Maybe Device -> m a -> m a -- | Prepend a scope to all nodes rendered in the given Build -- action. -withNameScope :: Text -> Build a -> Build a - --- | Returns a Tensor with a given name and the same shape and --- contents as the input. --- --- TODO(judahjacobson): This breaks when used with uninitialize --- Tensor Refs, since RefIdentity doesn't have --- SetAllowsUninitializedInput(). Look into whether we can change that --- op. -named :: TensorType a => Text -> Tensor v a -> Tensor v a +withNameScope :: MonadBuild m => Text -> m a -> m a -- | Modify a Build action, such that all new ops rendered in it -- will depend on the nodes in the first argument. -withControlDependencies :: Nodes t => t -> Build a -> Build a +withControlDependencies :: (MonadBuild m, Nodes t) => t -> m a -> m a -- | Create an op that groups multiple operations. -- -- When this op finishes, all ops in the input n have finished. -- This op has no output. -group :: Nodes t => t -> Build ControlNode - --- | Returns a Tensor with the same shape and contents as the input. -identity :: TensorType a => Tensor v a -> Tensor v a +group :: (MonadBuild m, Nodes t) => t -> m ControlNode -- | Does nothing. Only useful as a placeholder for control edges. -noOp :: ControlNode +noOp :: MonadBuild m => m ControlNode diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html index 0a54655..1eb3a3a 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/TensorFlow-GenOps-Core.html @@ -1,315 +1,215 @@ TensorFlow.GenOps.Core

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.GenOps.Core

Synopsis

Documentation

_HostRecv Source

Arguments

:: TensorType tensor_type 
=> Int64

send_device_incarnation: The current incarnation of send_device.

-> Build (Tensor Value tensor_type)

tensor: The tensor to receive.

Receives the named tensor from send_device on recv_device.

_HostRecv requires its input on host memory whereas _Recv requires its - input on device memory.

_HostSend Source

Arguments

:: TensorType t 
=> Int64

send_device_incarnation: The current incarnation of send_device.

-> Tensor v1 t

tensor: The tensor to send.

-> Build ControlNode 

Sends the named tensor from send_device to recv_device.

_HostSend requires its input on host memory whereas _Send requires its - input on device memory.

_Recv Source

Arguments

:: TensorType tensor_type 
=> Int64

send_device_incarnation: The current incarnation of send_device.

-> Build (Tensor Value tensor_type)

tensor: The tensor to receive.

Receives the named tensor from send_device on recv_device.

_Send Source

Arguments

:: TensorType t 
=> Int64

send_device_incarnation: The current incarnation of send_device.

-> Tensor v1 t

tensor: The tensor to send.

-> Build ControlNode 

Sends the named tensor from send_device to recv_device.

noOp :: ControlNode Source

Does nothing. Only useful as a placeholder for control edges.

_Retval Source

Arguments

:: TensorType t 
=> Int64

index: This return value is the index-th return value of the function.

-> Tensor v1 t

input: The return value.

-> Build ControlNode 

A graph node which represents a return value of a function.

_Arg Source

Arguments

:: TensorType t 
=> Int64

index: This argument is the index-th argument of the function.

-> Build (Tensor Value t)

output: The argument.

A graph node which represents an argument to a function.

quantizedBatchNormWithGlobalNormalization Source

Arguments

:: (TensorType tinput, OneOf `[Int16, Int32, Word16, Word8]` tinput, TensorType out_type, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Bool

scale_after_normalization: A bool indicating whether the resulted tensor - needs to be multiplied with gamma.

-> Float

variance_epsilon: A small float number to avoid dividing by 0.

-> Tensor v1 tinput

t: A 4D input Tensor.

-> Tensor v2 Float

t_min: The value represented by the lowest quantized input.

-> Tensor v3 Float

t_max: The value represented by the highest quantized input.

-> Tensor v4 tinput

m: A 1D mean Tensor with size matching the last dimension of t. +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Safe HaskellNone
LanguageHaskell2010

TensorFlow.GenOps.Core

Synopsis

Documentation

abort :: forall m'. MonadBuild m' => m' ControlNode

Raise a exception to abort the process when called. If exit_without_error is true, the process will exit normally, otherwise it will exit with a SIGABORT signal.

Returns nothing but an exception.

abort' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode

abs

Arguments

:: OneOf `[Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes the absolute value of a tensor.

Given a tensor x, this operation returns a tensor containing the absolute + value of each element in x. For example, if x is an input element and y is + an output element, this operation computes \(y = |x|\).

abs'

Arguments

:: OneOf `[Int32, Int64, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

accumulatorApplyGradient

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> Tensor Ref ByteString

handle: The handle to a accumulator.

-> Tensor v'2 Int64

local_step: The local_step value at which the gradient was computed.

-> Tensor v'3 dtype

gradient: A tensor of the gradient to be accumulated.

-> m' ControlNode 

Applies a gradient to a given accumulator. Does not add if local_step is lesser

than the accumulator's global_step.

accumulatorApplyGradient'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a accumulator.

-> Tensor v'2 Int64

local_step: The local_step value at which the gradient was computed.

-> Tensor v'3 dtype

gradient: A tensor of the gradient to be accumulated.

-> m' ControlNode 

accumulatorNumAccumulated

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

handle: The handle to an accumulator.

-> m' (Tensor Value Int32)

num_accumulated: The number of gradients aggregated in the given accumulator.

Returns the number of gradients aggregated in the given accumulators.

accumulatorNumAccumulated'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to an accumulator.

-> m' (Tensor Value Int32)

num_accumulated: The number of gradients aggregated in the given accumulator.

accumulatorSetGlobalStep

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

handle: The handle to an accumulator.

-> Tensor v'2 Int64

new_global_step: The new global_step value to set.

-> m' ControlNode 

Updates the accumulator with a new value for global_step. Logs warning if the

accumulator's value is already higher than new_global_step.

accumulatorSetGlobalStep'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to an accumulator.

-> Tensor v'2 Int64

new_global_step: The new global_step value to set.

-> m' ControlNode 

accumulatorTakeGradient

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> Tensor Ref ByteString

handle: The handle to an accumulator.

-> Tensor v'2 Int32

num_required: Number of gradients required before we return an aggregate.

-> m' (Tensor Value dtype)

average: The average of the accumulated gradients.

Extracts the average gradient in the given ConditionalAccumulator, provided

that sufficient (i.e., more than num_required) gradients have been accumulated. + The op blocks until sufficient gradients have been accumulated. + If the accumulator has already aggregated more than num_required gradients, it + returns the average of the accumulated gradients. + Also automatically increments the recorded global_step in the accumulator by 1, + and resets the aggregate to 0.

accumulatorTakeGradient'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to an accumulator.

-> Tensor v'2 Int32

num_required: Number of gradients required before we return an aggregate.

-> m' (Tensor Value dtype)

average: The average of the accumulated gradients.

acos

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes acos of x element-wise.

add

Arguments

:: OneOf `[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns x + y element-wise.

  • NOTE*: Add supports broadcasting. AddN does not. More about broadcasting + here

addManySparseToTensorsMap

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor v'1 Int64

sparse_indices: 2-D. The indices of the minibatch SparseTensor. + `sparse_indices[:, 0]` must be ordered values in `[0, N)`.

-> Tensor v'2 t

sparse_values: 1-D. The values of the minibatch SparseTensor.

-> Tensor v'3 Int64

sparse_shape: 1-D. The shape of the minibatch SparseTensor. + The minibatch size `N == sparse_shape[0]`.

-> m' (Tensor Value Int64)

sparse_handles: 1-D. The handles of the SparseTensor now stored in the + SparseTensorsMap. Shape: `[N]`.

Add an N-minibatch SparseTensor to a SparseTensorsMap, return N handles.

A SparseTensor of rank R is represented by three tensors: sparse_indices, + sparse_values, and sparse_shape, where

```sparse_indices.shape[1] == sparse_shape.shape[0] == R```

An N-minibatch of SparseTensor objects is represented as a SparseTensor + having a first sparse_indices column taking values between `[0, N)`, where + the minibatch size `N == sparse_shape[0]`.

The input SparseTensor must have rank R greater than 1, and the first + dimension is treated as the minibatch dimension. Elements of the SparseTensor + must be sorted in increasing order of this first dimension. The stored + SparseTensor objects pointed to by each row of the output sparse_handles + will have rank `R-1`.

The SparseTensor values can then be read out as part of a minibatch by passing + the given keys as vector elements to TakeManySparseFromTensorsMap. To ensure + the correct SparseTensorsMap is accessed, ensure that the same + container and shared_name are passed to that Op. If no shared_name + is provided here, instead use the *name* of the Operation created by calling + AddManySparseToTensorsMap as the shared_name passed to + TakeManySparseFromTensorsMap. Ensure the Operations are colocated.

addManySparseToTensorsMap'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor v'1 Int64

sparse_indices: 2-D. The indices of the minibatch SparseTensor. + `sparse_indices[:, 0]` must be ordered values in `[0, N)`.

-> Tensor v'2 t

sparse_values: 1-D. The values of the minibatch SparseTensor.

-> Tensor v'3 Int64

sparse_shape: 1-D. The shape of the minibatch SparseTensor. + The minibatch size `N == sparse_shape[0]`.

-> m' (Tensor Value Int64)

sparse_handles: 1-D. The handles of the SparseTensor now stored in the + SparseTensorsMap. Shape: `[N]`.

addN

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> [Tensor v'1 t]

inputs: Must all be the same size and shape.

-> Tensor Build t

sum

Add all input tensors element wise.

addN'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> [Tensor v'1 t]

inputs: Must all be the same size and shape.

-> Tensor Build t

sum

addSparseToTensorsMap

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor v'1 Int64

sparse_indices: 2-D. The indices of the SparseTensor.

-> Tensor v'2 t

sparse_values: 1-D. The values of the SparseTensor.

-> Tensor v'3 Int64

sparse_shape: 1-D. The shape of the SparseTensor.

-> m' (Tensor Value Int64)

sparse_handle: 0-D. The handle of the SparseTensor now stored in the + SparseTensorsMap.

Add a SparseTensor to a SparseTensorsMap return its handle.

A SparseTensor is represented by three tensors: sparse_indices, + sparse_values, and sparse_shape.

This operator takes the given SparseTensor and adds it to a container + object (a SparseTensorsMap). A unique key within this container is generated + in the form of an int64, and this is the value that is returned.

The SparseTensor can then be read out as part of a minibatch by passing + the key as a vector element to TakeManySparseFromTensorsMap. To ensure + the correct SparseTensorsMap is accessed, ensure that the same + container and shared_name are passed to that Op. If no shared_name + is provided here, instead use the *name* of the Operation created by calling + AddSparseToTensorsMap as the shared_name passed to + TakeManySparseFromTensorsMap. Ensure the Operations are colocated.

addSparseToTensorsMap'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor v'1 Int64

sparse_indices: 2-D. The indices of the SparseTensor.

-> Tensor v'2 t

sparse_values: 1-D. The values of the SparseTensor.

-> Tensor v'3 Int64

sparse_shape: 1-D. The shape of the SparseTensor.

-> m' (Tensor Value Int64)

sparse_handle: 0-D. The handle of the SparseTensor now stored in the + SparseTensorsMap.

adjustContrast

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t 
=> Tensor v'1 t

images

-> Tensor v'2 Float

contrast_factor

-> Tensor v'3 Float

min_value

-> Tensor v'4 Float

max_value

-> Tensor Build Float

output

Deprecated. Disallowed in GraphDef version >= 2.

adjustContrast'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

images

-> Tensor v'2 Float

contrast_factor

-> Tensor v'3 Float

min_value

-> Tensor v'4 Float

max_value

-> Tensor Build Float

output

adjustContrastv2

Arguments

:: Tensor v'1 Float

images: Images to adjust. At least 3-D.

-> Tensor v'2 Float

contrast_factor: A float multiplier for adjusting contrast.

-> Tensor Build Float

output: The contrast-adjusted image or images.

Adjust the contrast of one or more images.

images is a tensor of at least 3 dimensions. The last 3 dimensions are + interpreted as `[height, width, channels]`. The other dimensions only + represent a collection of images, such as `[batch, height, width, channels].`

Contrast is adjusted independently for each channel of each image.

For each channel, the Op first computes the mean of the image pixels in the + channel and then adjusts each component of each pixel to + `(x - mean) * contrast_factor + mean`.

adjustContrastv2'

Arguments

:: OpParams 
-> Tensor v'1 Float

images: Images to adjust. At least 3-D.

-> Tensor v'2 Float

contrast_factor: A float multiplier for adjusting contrast.

-> Tensor Build Float

output: The contrast-adjusted image or images.

adjustHue

Arguments

:: Tensor v'1 Float

images: Images to adjust. At least 3-D.

-> Tensor v'2 Float

delta: A float delta to add to the hue.

-> Tensor Build Float

output: The hue-adjusted image or images.

Adjust the hue of one or more images.

images is a tensor of at least 3 dimensions. The last dimension is + interpretted as channels, and must be three.

The input image is considered in the RGB colorspace. Conceptually, the RGB + colors are first mapped into HSV. A delta is then applied all the hue values, + and then remapped back to RGB colorspace.

adjustHue'

Arguments

:: OpParams 
-> Tensor v'1 Float

images: Images to adjust. At least 3-D.

-> Tensor v'2 Float

delta: A float delta to add to the hue.

-> Tensor Build Float

output: The hue-adjusted image or images.

adjustSaturation

Arguments

:: Tensor v'1 Float

images: Images to adjust. At least 3-D.

-> Tensor v'2 Float

scale: A float scale to add to the saturation.

-> Tensor Build Float

output: The hue-adjusted image or images.

Adjust the saturation of one or more images.

images is a tensor of at least 3 dimensions. The last dimension is + interpretted as channels, and must be three.

The input image is considered in the RGB colorspace. Conceptually, the RGB + colors are first mapped into HSV. A scale is then applied all the saturation + values, and then remapped back to RGB colorspace.

adjustSaturation'

Arguments

:: OpParams 
-> Tensor v'1 Float

images: Images to adjust. At least 3-D.

-> Tensor v'2 Float

scale: A float scale to add to the saturation.

-> Tensor Build Float

output: The hue-adjusted image or images.

all

Arguments

:: OneOf `[Int32, Int64]` tidx 
=> Tensor v'1 Bool

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build Bool

output: The reduced tensor.

Computes the "logical and" of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

all'

Arguments

:: OneOf `[Int32, Int64]` tidx 
=> OpParams 
-> Tensor v'1 Bool

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build Bool

output: The reduced tensor.

allCandidateSampler

Arguments

:: Int64

num_sampled: Number of candidates to produce per batch.

-> Int64

num_true: Number of true labels per context.

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

Generates labels for candidate sampling with a learned unigram distribution.

See explanations of candidate sampling and the data formats at + go/candidate-sampling.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

allCandidateSampler'

Arguments

:: OpParams 
-> Int64

num_sampled: Number of candidates to produce per batch.

-> Int64

num_true: Number of true labels per context.

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

any

Arguments

:: OneOf `[Int32, Int64]` tidx 
=> Tensor v'1 Bool

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build Bool

output: The reduced tensor.

Computes the "logical or" of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

any'

Arguments

:: OneOf `[Int32, Int64]` tidx 
=> OpParams 
-> Tensor v'1 Bool

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build Bool

output: The reduced tensor.

applyAdadelta

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

accum_update: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay factor. Must be a scalar.

-> Tensor v'6 t

epsilon: Constant factor. Must be a scalar.

-> Tensor v'7 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' according to the adadelta scheme.

accum = rho() * accum + (1 - rho()) * grad.square(); + update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + update_accum = rho() * update_accum + (1 - rho()) * update.square(); + var -= update;

applyAdadelta'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

accum_update: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay factor. Must be a scalar.

-> Tensor v'6 t

epsilon: Constant factor. Must be a scalar.

-> Tensor v'7 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

applyAdagrad

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' according to the adagrad scheme.

accum += grad * grad + var -= lr * grad * (1 / sqrt(accum))

applyAdagrad'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

applyAdagradDA

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

gradient_accumulator: Should be from a Variable().

-> Tensor Ref t

gradient_squared_accumulator: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'7 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'8 Int64

global_step: Training step number. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' according to the proximal adagrad scheme.

applyAdagradDA'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

gradient_accumulator: Should be from a Variable().

-> Tensor Ref t

gradient_squared_accumulator: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'7 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'8 Int64

global_step: Training step number. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

applyAdam

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

m: Should be from a Variable().

-> Tensor Ref t

v: Should be from a Variable().

-> Tensor v'4 t

beta1_power: Must be a scalar.

-> Tensor v'5 t

beta2_power: Must be a scalar.

-> Tensor v'6 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'7 t

beta1: Momentum factor. Must be a scalar.

-> Tensor v'8 t

beta2: Momentum factor. Must be a scalar.

-> Tensor v'9 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'10 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' according to the Adam algorithm.

lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) + m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t + v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t + variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)

applyAdam'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

m: Should be from a Variable().

-> Tensor Ref t

v: Should be from a Variable().

-> Tensor v'4 t

beta1_power: Must be a scalar.

-> Tensor v'5 t

beta2_power: Must be a scalar.

-> Tensor v'6 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'7 t

beta1: Momentum factor. Must be a scalar.

-> Tensor v'8 t

beta2: Momentum factor. Must be a scalar.

-> Tensor v'9 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'10 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

applyCenteredRMSProp

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

mg: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

rho: Decay rate. Must be a scalar.

-> Tensor v'7 t

momentum

-> Tensor v'8 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'9 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' according to the centered RMSProp algorithm.

The centered RMSProp algorithm uses an estimate of the centered second moment + (i.e., the variance) for normalization, as opposed to regular RMSProp, which + uses the (uncentered) second moment. This often helps with training, but is + slightly more expensive in terms of computation and memory.

Note that in dense implementation of this algorithm, mg, ms, and mom will + update even if the grad is zero, but in this sparse implementation, mg, ms, + and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 + mean_grad = decay * mean_grad + (1-decay) * gradient

Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)

mg <- rho * mg_{t-1} + (1-rho) * grad + ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + var <- var - mom

applyCenteredRMSProp'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

mg: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

rho: Decay rate. Must be a scalar.

-> Tensor v'7 t

momentum

-> Tensor v'8 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'9 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

applyFtrl

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

linear: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

l1: L1 regulariation. Must be a scalar.

-> Tensor v'7 t

l2: L2 regulariation. Must be a scalar.

-> Tensor v'8 t

lr_power: Scaling factor. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' according to the Ftrl-proximal scheme.

accum_new = accum + grad * grad + linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new

applyFtrl'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

linear: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

l1: L1 regulariation. Must be a scalar.

-> Tensor v'7 t

l2: L2 regulariation. Must be a scalar.

-> Tensor v'8 t

lr_power: Scaling factor. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

applyGradientDescent

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

delta: The change.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' by subtracting alpha * delta from it.

applyGradientDescent'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

delta: The change.

-> m' (Tensor Ref t)

out: Same as "var".

applyMomentum

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

momentum: Momentum. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' according to the momentum scheme. Set use_nesterov = True if you

want to use Nesterov momentum.

accum = accum * momentum + grad + var -= lr * accum

applyMomentum'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

momentum: Momentum. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

applyProximalAdagrad

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'5 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'6 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.

accum += grad * grad + prox_v = var - lr * grad * (1 / sqrt(accum)) + var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

applyProximalAdagrad'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'5 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'6 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

applyProximalGradientDescent

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'4 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'5 t

delta: The change.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' as FOBOS algorithm with fixed learning rate.

prox_v = var - alpha * delta + var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

applyProximalGradientDescent'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'4 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'5 t

delta: The change.

-> m' (Tensor Ref t)

out: Same as "var".

applyRMSProp

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay rate. Must be a scalar.

-> Tensor v'6 t

momentum

-> Tensor v'7 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'8 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' according to the RMSProp algorithm.

Note that in dense implementation of this algorithm, ms and mom will + update even if the grad is zero, but in this sparse implementation, ms + and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 + Delta = learning_rate * gradient / sqrt(mean_square + epsilon)

ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom

applyRMSProp'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay rate. Must be a scalar.

-> Tensor v'6 t

momentum

-> Tensor v'7 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'8 t

grad: The gradient.

-> m' (Tensor Ref t)

out: Same as "var".

argMax

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

input

-> Tensor v'2 tidx

dimension: int32, 0 <= dimension < rank(input). Describes which dimension + of the input Tensor to reduce across. For vectors, use dimension = 0.

-> Tensor Build Int64

output

Returns the index with the largest value across dimensions of a tensor.

argMax'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 tidx

dimension: int32, 0 <= dimension < rank(input). Describes which dimension + of the input Tensor to reduce across. For vectors, use dimension = 0.

-> Tensor Build Int64

output

argMin

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

input

-> Tensor v'2 tidx

dimension: int32, 0 <= dimension < rank(input). Describes which dimension + of the input Tensor to reduce across. For vectors, use dimension = 0.

-> Tensor Build Int64

output

Returns the index with the smallest value across dimensions of a tensor.

argMin'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 tidx

dimension: int32, 0 <= dimension < rank(input). Describes which dimension + of the input Tensor to reduce across. For vectors, use dimension = 0.

-> Tensor Build Int64

output

asString

Arguments

:: OneOf `[Complex Float, Bool, Int32, Int64, Int8, Double, Float]` t 
=> Tensor v'1 t

input

-> Tensor Build ByteString

output

Converts each entry in the given tensor to strings. Supports many numeric

types and boolean.

asString'

Arguments

:: OneOf `[Complex Float, Bool, Int32, Int64, Int8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build ByteString

output

asin

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes asin of x element-wise.

assert

Arguments

:: (MonadBuild m', TensorTypes t) 
=> Tensor v'1 Bool

condition: The condition to evaluate.

-> TensorList v'2 t

data: The tensors to print out when condition is false.

-> m' ControlNode 

Asserts that the given condition is true.

If condition evaluates to false, print the list of tensors in `data`. + summarize determines how many entries of the tensors to print.

assert'

Arguments

:: (MonadBuild m', TensorTypes t) 
=> OpParams 
-> Tensor v'1 Bool

condition: The condition to evaluate.

-> TensorList v'2 t

data: The tensors to print out when condition is false.

-> m' ControlNode 

assign

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref t

ref: Should be from a Variable node. May be uninitialized.

-> Tensor v'2 t

value: The value to be assigned to the variable.

-> m' (Tensor Ref t)

output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been reset.

Update ref by assigning value to it.

This operation outputs "ref" after the assignment is done. + This makes it easier to chain operations that need to use the reset value.

assign'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref t

ref: Should be from a Variable node. May be uninitialized.

-> Tensor v'2 t

value: The value to be assigned to the variable.

-> m' (Tensor Ref t)

output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been reset.

assignAdd

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 t

value: The value to be added to the variable.

-> m' (Tensor Ref t)

output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been updated.

Update ref by adding value to it.

This operation outputs "ref" after the update is done. + This makes it easier to chain operations that need to use the reset value.

assignAdd'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 t

value: The value to be added to the variable.

-> m' (Tensor Ref t)

output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been updated.

assignAddVariableOp

Arguments

:: (MonadBuild m', TensorType dtype) 
=> ResourceHandle

resource: handle to the resource in which to store the variable.

-> Tensor v'2 dtype

value: the value by which the variable will be incremented.

-> m' ControlNode 

Adds a value to the current value of a variable.

Any ReadVariableOp which depends directly or indirectly on this assign is + guaranteed to see the incremented value or a subsequent newer one.

Outputs the incremented value, which can be used to totally order the + increments to this variable.

assignAddVariableOp'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> ResourceHandle

resource: handle to the resource in which to store the variable.

-> Tensor v'2 dtype

value: the value by which the variable will be incremented.

-> m' ControlNode 

assignSub

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 t

value: The value to be subtracted to the variable.

-> m' (Tensor Ref t)

output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been updated.

Update ref by subtracting value from it.

This operation outputs "ref" after the update is done. + This makes it easier to chain operations that need to use the reset value.

assignSub'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 t

value: The value to be subtracted to the variable.

-> m' (Tensor Ref t)

output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been updated.

assignVariableOp

Arguments

:: (MonadBuild m', TensorType dtype) 
=> ResourceHandle

resource: handle to the resource in which to store the variable.

-> Tensor v'2 dtype

value: the value to set the new tensor to use.

-> m' ControlNode 

Assigns a new value to a variable.

Any ReadVariableOp with a control dependency on this op is guaranteed to return + this value or a subsequent newer value of the variable.

assignVariableOp'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> ResourceHandle

resource: handle to the resource in which to store the variable.

-> Tensor v'2 dtype

value: the value to set the new tensor to use.

-> m' ControlNode 

atan

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes atan of x element-wise.

audioSummary

Arguments

:: Float

sample_rate: The sample rate of the signal in hertz.

-> Tensor v'1 ByteString

tag: Scalar. Used to build the tag attribute of the summary values.

-> Tensor v'2 Float

tensor: 2-D of shape `[batch_size, frames]`.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

Outputs a Summary protocol buffer with audio.

The summary has up to max_outputs summary values containing audio. The + audio is built from tensor which must be 3-D with shape `[batch_size, + frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are + assumed to be in the range of `[-1.0, 1.0]` with a sample rate of sample_rate.

The tag argument is a scalar Tensor of type string. It is used to + build the tag of the summary values:

  • If max_outputs is 1, the summary value tag is '*tag*/audio'.
  • If max_outputs is greater than 1, the summary value tags are + generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.

audioSummary'

Arguments

:: OpParams 
-> Float

sample_rate: The sample rate of the signal in hertz.

-> Tensor v'1 ByteString

tag: Scalar. Used to build the tag attribute of the summary values.

-> Tensor v'2 Float

tensor: 2-D of shape `[batch_size, frames]`.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

audioSummaryV2

Arguments

:: Tensor v'1 ByteString

tag: Scalar. Used to build the tag attribute of the summary values.

-> Tensor v'2 Float

tensor: 2-D of shape `[batch_size, frames]`.

-> Tensor v'3 Float

sample_rate: The sample rate of the signal in hertz.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

Outputs a Summary protocol buffer with audio.

The summary has up to max_outputs summary values containing audio. The + audio is built from tensor which must be 3-D with shape `[batch_size, + frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are + assumed to be in the range of `[-1.0, 1.0]` with a sample rate of sample_rate.

The tag argument is a scalar Tensor of type string. It is used to + build the tag of the summary values:

  • If max_outputs is 1, the summary value tag is '*tag*/audio'.
  • If max_outputs is greater than 1, the summary value tags are + generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.

audioSummaryV2'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

tag: Scalar. Used to build the tag attribute of the summary values.

-> Tensor v'2 Float

tensor: 2-D of shape `[batch_size, frames]`.

-> Tensor v'3 Float

sample_rate: The sample rate of the signal in hertz.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

avgPool

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

value: 4-D with shape `[batch, height, width, channels]`.

-> Tensor Build t

output: The average pooled output tensor.

Performs average pooling on the input.

Each entry in output is the mean of the corresponding size ksize + window in value.

avgPool'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

value: 4-D with shape `[batch, height, width, channels]`.

-> Tensor Build t

output: The average pooled output tensor.

avgPool3D

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

-> Tensor Build t

output: The average pooled output tensor.

Performs 3D average pooling on the input.

avgPool3D'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

-> Tensor Build t

output: The average pooled output tensor.

avgPool3DGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 Int32

orig_input_shape: The original input dimensions.

-> Tensor v'2 t

grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

-> Tensor Build t

output: The backprop for input.

Computes gradients of average pooling function.

avgPool3DGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int32

orig_input_shape: The original input dimensions.

-> Tensor v'2 t

grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

-> Tensor Build t

output: The backprop for input.

avgPoolGrad

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 Int32

orig_input_shape: 1-D. Shape of the original input to avg_pool.

-> Tensor v'2 t

grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. + the output of avg_pool.

-> Tensor Build t

output: 4-D. Gradients w.r.t. the input of avg_pool.

Computes gradients of the average pooling function.

avgPoolGrad'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int32

orig_input_shape: 1-D. Shape of the original input to avg_pool.

-> Tensor v'2 t

grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. + the output of avg_pool.

-> Tensor Build t

output: 4-D. Gradients w.r.t. the input of avg_pool.

barrier

Arguments

:: MonadBuild m' 
=> [DataType]

component_types: The type of each component in a value.

-> m' (Tensor Ref ByteString)

handle: The handle to the barrier.

Defines a barrier that persists across different graph executions.

A barrier represents a key-value map, where each key is a string, and + each value is a tuple of tensors.

At runtime, the barrier contains complete and incomplete + elements. A complete element has defined tensors for all components of + its value tuple, and may be accessed using BarrierTakeMany. An + incomplete element has some undefined components in its value tuple, + and may be updated using BarrierInsertMany.

barrier'

Arguments

:: MonadBuild m' 
=> OpParams 
-> [DataType]

component_types: The type of each component in a value.

-> m' (Tensor Ref ByteString)

handle: The handle to the barrier.

barrierClose

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

handle: The handle to a barrier.

-> m' ControlNode 

Closes the given barrier.

This operation signals that no more new elements will be inserted in the + given barrier. Subsequent InsertMany that try to introduce a new key will fail. + Subsequent InsertMany operations that just add missing components to already + existing elements will continue to succeed. Subsequent TakeMany operations will + continue to succeed if sufficient completed elements remain in the barrier. + Subsequent TakeMany operations that would block will fail immediately.

barrierClose'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a barrier.

-> m' ControlNode 

barrierIncompleteSize

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

handle: The handle to a barrier.

-> m' (Tensor Value Int32)

size: The number of incomplete elements (i.e. those with some of their value + components not set) in the barrier.

Computes the number of incomplete elements in the given barrier.

barrierIncompleteSize'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a barrier.

-> m' (Tensor Value Int32)

size: The number of incomplete elements (i.e. those with some of their value + components not set) in the barrier.

barrierInsertMany

Arguments

:: (MonadBuild m', TensorType t) 
=> Int64

component_index: The component of the barrier elements that is being assigned.

-> Tensor Ref ByteString

handle: The handle to a barrier.

-> Tensor v'2 ByteString

keys: A one-dimensional tensor of keys, with length n.

-> Tensor v'3 t

values: An any-dimensional tensor of values, which are associated with the + respective keys. The 0th dimension must have length n.

-> m' ControlNode 

For each key, assigns the respective value to the specified component.

If a key is not found in the barrier, this operation will create a new + incomplete element. If a key is found in the barrier, and the element + already has a value at component_index, this operation will fail with + INVALID_ARGUMENT, and leave the barrier in an undefined state.

barrierInsertMany'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Int64

component_index: The component of the barrier elements that is being assigned.

-> Tensor Ref ByteString

handle: The handle to a barrier.

-> Tensor v'2 ByteString

keys: A one-dimensional tensor of keys, with length n.

-> Tensor v'3 t

values: An any-dimensional tensor of values, which are associated with the + respective keys. The 0th dimension must have length n.

-> m' ControlNode 

barrierReadySize

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

handle: The handle to a barrier.

-> m' (Tensor Value Int32)

size: The number of complete elements (i.e. those with all of their value + components set) in the barrier.

Computes the number of complete elements in the given barrier.

barrierReadySize'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a barrier.

-> m' (Tensor Value Int32)

size: The number of complete elements (i.e. those with all of their value + components set) in the barrier.

barrierTakeMany

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> Tensor Ref ByteString

handle: The handle to a barrier.

-> Tensor v'2 Int32

num_elements: A single-element tensor containing the number of elements to + take.

-> m' (Tensor Value Int64, Tensor Value ByteString, TensorList Value component_types)

(indices, keys, values)

  • indices: A one-dimensional tensor of indices, with length num_elems. + These indices refer to the batch in which the values were placed into the + barrier (starting with MIN_LONG and increasing with each BarrierInsertMany).
  • keys: A one-dimensional tensor of keys, with length num_elements.
  • values: One any-dimensional tensor per component in a barrier element. All + values have length num_elements in the 0th dimension.

Takes the given number of completed elements from a barrier.

This operation concatenates completed-element component tensors along + the 0th dimension to make a single component tensor.

Elements come out of the barrier when they are complete, and in the order + in which they were placed into the barrier. The indices output provides + information about the batch in which each element was originally inserted + into the barrier.

barrierTakeMany'

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a barrier.

-> Tensor v'2 Int32

num_elements: A single-element tensor containing the number of elements to + take.

-> m' (Tensor Value Int64, Tensor Value ByteString, TensorList Value component_types)

(indices, keys, values)

  • indices: A one-dimensional tensor of indices, with length num_elems. + These indices refer to the batch in which the values were placed into the + barrier (starting with MIN_LONG and increasing with each BarrierInsertMany).
  • keys: A one-dimensional tensor of keys, with length num_elements.
  • values: One any-dimensional tensor per component in a barrier element. All + values have length num_elements in the 0th dimension.

batchCholesky

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input

-> Tensor Build t

output

batchCholesky'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build t

output

batchCholeskyGrad

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

l

-> Tensor v'2 t

grad

-> Tensor Build t

output

batchCholeskyGrad'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

l

-> Tensor v'2 t

grad

-> Tensor Build t

output

batchFFT

Arguments

:: Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchFFT'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchFFT2D

Arguments

:: Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchFFT2D'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchFFT3D

Arguments

:: Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchFFT3D'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchIFFT

Arguments

:: Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchIFFT'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchIFFT2D

Arguments

:: Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchIFFT2D'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchIFFT3D

Arguments

:: Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchIFFT3D'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input

-> Tensor Build (Complex Float)

output

batchMatMul

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t 
=> Tensor v'1 t

x: 3-D or higher with shape `[..., r_x, c_x]`.

-> Tensor v'2 t

y: 3-D or higher with shape `[..., r_y, c_y]`.

-> Tensor Build t

output: 3-D or higher with shape `[..., r_o, c_o]`

Multiplies slices of two tensors in batches.

Multiplies all slices of Tensor x and y (each slice can be + viewed as an element of a batch), and arranges the individual results + in a single output tensor of the same batch size. Each of the + individual slices can optionally be adjointed (to adjoint a matrix + means to transpose and conjugate it) before multiplication by setting + the adj_x or adj_y flag to True, which are by default False.

The input tensors x and y are 3-D or higher with shape `[..., r_x, c_x]` + and `[..., r_y, c_y]`.

The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, where:

r_o = c_x if adj_x else r_x + c_o = r_y if adj_y else c_y

It is computed as:

output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])

batchMatMul'

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x: 3-D or higher with shape `[..., r_x, c_x]`.

-> Tensor v'2 t

y: 3-D or higher with shape `[..., r_y, c_y]`.

-> Tensor Build t

output: 3-D or higher with shape `[..., r_o, c_o]`

batchMatrixBandPart

Arguments

:: TensorType t 
=> Tensor v'1 t

input

-> Tensor v'2 Int64

num_lower

-> Tensor v'3 Int64

num_upper

-> Tensor Build t

band

batchMatrixBandPart'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 Int64

num_lower

-> Tensor v'3 Int64

num_upper

-> Tensor Build t

band

batchMatrixDeterminant

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input

-> Tensor Build t

output

batchMatrixDeterminant'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build t

output

batchMatrixDiag

Arguments

:: TensorType t 
=> Tensor v'1 t

diagonal

-> Tensor Build t

output

batchMatrixDiag'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

diagonal

-> Tensor Build t

output

batchMatrixDiagPart

Arguments

:: TensorType t 
=> Tensor v'1 t

input

-> Tensor Build t

diagonal

batchMatrixDiagPart'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build t

diagonal

batchMatrixInverse

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input

-> Tensor Build t

output

batchMatrixInverse'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build t

output

batchMatrixSetDiag

Arguments

:: TensorType t 
=> Tensor v'1 t

input

-> Tensor v'2 t

diagonal

-> Tensor Build t

output

batchMatrixSetDiag'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 t

diagonal

-> Tensor Build t

output

batchMatrixSolve

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

matrix

-> Tensor v'2 t

rhs

-> Tensor Build t

output

batchMatrixSolve'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

matrix

-> Tensor v'2 t

rhs

-> Tensor Build t

output

batchMatrixSolveLs

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

matrix

-> Tensor v'2 t

rhs

-> Tensor v'3 Double

l2_regularizer

-> Tensor Build t

output

batchMatrixSolveLs'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

matrix

-> Tensor v'2 t

rhs

-> Tensor v'3 Double

l2_regularizer

-> Tensor Build t

output

batchMatrixTriangularSolve

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

matrix

-> Tensor v'2 t

rhs

-> Tensor Build t

output

batchMatrixTriangularSolve'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

matrix

-> Tensor v'2 t

rhs

-> Tensor Build t

output

batchNormWithGlobalNormalization

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Bool

scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

-> Float

variance_epsilon: A small float number to avoid dividing by 0.

-> Tensor v'1 t

t: A 4D input Tensor.

-> Tensor v'2 t

m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, - or a saved moving average thereof.

-> Tensor v5 Float

m_min: The value represented by the lowest quantized mean.

-> Tensor v6 Float

m_max: The value represented by the highest quantized mean.

-> Tensor v7 tinput

v: A 1D variance Tensor with size matching the last dimension of t. + or a saved moving average thereof.

-> Tensor v'3 t

v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, - or a saved moving average thereof.

-> Tensor v8 Float

v_min: The value represented by the lowest quantized variance.

-> Tensor v9 Float

v_max: The value represented by the highest quantized variance.

-> Tensor v10 tinput

beta: A 1D beta Tensor with size matching the last dimension of t. - An offset to be added to the normalized tensor.

-> Tensor v11 Float

beta_min: The value represented by the lowest quantized offset.

-> Tensor v12 Float

beta_max: The value represented by the highest quantized offset.

-> Tensor v13 tinput

gamma: A 1D gamma Tensor with size matching the last dimension of t. + or a saved moving average thereof.

-> Tensor v'4 t

beta: A 1D beta Tensor with size matching the last dimension of t. + An offset to be added to the normalized tensor.

-> Tensor v'5 t

gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied - with the normalized tensor.

-> Tensor v14 Float

gamma_min: The value represented by the lowest quantized gamma.

-> Tensor v15 Float

gamma_max: The value represented by the highest quantized gamma.

-> (Tensor Value out_type, Tensor Value Float, Tensor Value Float)

(result, result_min, result_max)

  • result
  • result_min
  • result_max

Quantized Batch normalization.

This op is deprecated and will be removed in the future. Prefer - `tf.nn.batch_normalization`.

quantizedRelu6 Source

Arguments

:: (TensorType tinput, OneOf `[Int16, Int32, Word16, Word8]` tinput, TensorType out_type, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v1 tinput

features

-> Tensor v2 Float

min_features: The float value that the lowest quantized value represents.

-> Tensor v3 Float

max_features: The float value that the highest quantized value represents.

-> (Tensor Value out_type, Tensor Value Float, Tensor Value Float)

(activations, min_activations, max_activations)

  • activations: Has the same output shape as "features".
  • min_activations: The float value that the lowest quantized value represents.
  • max_activations: The float value that the highest quantized value represents.

Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`

quantizedBiasAdd Source

Arguments

:: (TensorType t1, OneOf `[Int16, Int32, Word16, Word8]` t1, TensorType t2, OneOf `[Int16, Int32, Word16, Word8]` t2, TensorType out_type, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v1 t1

input

-> Tensor v2 t2

bias: A 1D bias Tensor with size matching the last dimension of input.

-> Tensor v3 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v4 Float

max_input: The float value that the highest quantized input value represents.

-> Tensor v5 Float

min_bias: The float value that the lowest quantized bias value represents.

-> Tensor v6 Float

max_bias: The float value that the highest quantized bias value represents.

-> (Tensor Value out_type, Tensor Value Float, Tensor Value Float)

(output, min_out, max_out)

  • output
  • min_out: The float value that the lowest quantized output value represents.
  • max_out: The float value that the highest quantized output value represents.

Adds Tensor bias to Tensor input for Quantized types.

Broadcasts the values of bias on dimensions 0..N-2 of input.

fractionalAvgPoolGrad Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
=> Tensor v1 Int64

orig_input_tensor_shape: Original input tensor shape for fractional_avg_pool

-> Tensor v2 t

out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients - w.r.t. the output of fractional_avg_pool.

-> Tensor v3 Int64

row_pooling_sequence: row pooling sequence, form pooling region with - col_pooling_sequence.

-> Tensor v4 Int64

col_pooling_sequence: column pooling sequence, form pooling region with - row_pooling sequence.

-> Tensor Value t

output: 4-D. Gradients w.r.t. the input of fractional_avg_pool.

Computes gradient of the FractionalAvgPool function.

Unlike FractionalMaxPoolGrad, we don't need to find arg_max for - FractionalAvgPoolGrad, we just need to evenly back-propagate each element of - out_backprop to those indices that form the same pooling cell. Therefore, we - just need to know the shape of original input tensor, instead of the whole - tensor.

fractionalMaxPoolGrad Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
=> Tensor v1 t

orig_input: Original input for fractional_max_pool

-> Tensor v2 t

orig_output: Original output for fractional_max_pool

-> Tensor v3 t

out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients - w.r.t. the output of fractional_max_pool.

-> Tensor v4 Int64

row_pooling_sequence: row pooling sequence, form pooling region with - col_pooling_sequence.

-> Tensor v5 Int64

col_pooling_sequence: column pooling sequence, form pooling region with - row_pooling sequence.

-> Tensor Value t

output: 4-D. Gradients w.r.t. the input of fractional_max_pool.

Computes gradient of the FractionalMaxPool function.

fractionalMaxPool Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
=> Tensor v1 t

value: 4-D with shape `[batch, height, width, channels]`.

-> (Tensor Value t, Tensor Value Int64, Tensor Value Int64)

(output, row_pooling_sequence, col_pooling_sequence)

  • output: output tensor after fractional max pooling.
  • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
  • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

Performs fractional max pooling on the input.

Fractional max pooling is slightly different than regular max pooling. In - regular max pooling, you downsize an input set by taking the maximum value of - smaller N x N subsections of the set (often 2x2), and try to reduce the set by - a factor of N, where N is an integer. Fractional max pooling, as you might - expect from the word "fractional", means that the overall reduction ratio N - does not have to be an integer.

The sizes of the pooling regions are generated randomly but are fairly uniform. - For example, let's look at the height dimension, and the constraints on the - list of rows that will be pool boundaries.

First we define the following:

  1. input_row_length : the number of rows from the input set
  2. output_row_length : which will be smaller than the input
  3. alpha = input_row_length / output_row_length : our reduction ratio
  4. K = floor(alpha)
  5. row_pooling_sequence : this is the result list of pool boundary rows

Then, row_pooling_sequence should satisfy:

  1. a[0] = 0 : the first value of the sequence is 0
  2. a[end] = input_row_length : the last value of the sequence is the size
  3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
  4. length(row_pooling_sequence) = output_row_length+1

For more details on fractional max pooling, see this paper: - Benjamin Graham, Fractional Max-Pooling

topK Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Int64

k: Number of top elements to look for along the last dimension (along each - row for matrices).

-> Tensor v1 t

input: 1-D or higher with last dimension at least k.

-> (Tensor Value t, Tensor Value Int32)

(values, indices)

  • values: The k largest elements along each last dimensional slice.
  • indices: The indices of values within the last dimension of input.

Finds values and indices of the k largest elements for the last dimension.

If the input is a vector (rank-1), finds the k largest entries in the vector - and outputs their values and indices as vectors. Thus `values[j]` is the - j-th largest entry in input, and its index is `indices[j]`.

For matrices (resp. higher rank input), computes the top k entries in each - row (resp. vector along the last dimension). Thus,

values.shape = indices.shape = input.shape[:-1] + [k]

If two elements are equal, the lower-index element appears first.

If k varies dynamically, use TopKV2 below.

inTopK Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` t) 
=> Int64

k: Number of top elements to look at for computing precision.

-> Tensor v1 Float

predictions: A batch_size x classes tensor.

-> Tensor v2 t

targets: A batch_size vector of class ids.

-> Tensor Value Bool

precision: Computed Precision at k as a `bool Tensor`.

Says whether the targets are in the top K predictions.

This outputs a batch_size bool array, an entry `out[i]` is true if the - prediction for the target class is among the top k predictions among - all predictions for example i. Note that the behavior of InTopK differs - from the TopK op in its handling of ties; if multiple classes have the - same prediction value and straddle the top-k boundary, all of those - classes are considered to be in the top k.

More formally, let

\(predictions_i\) be the predictions for all classes for example i, - \(targets_i\) be the target class for example i, - \(out_i\) be the output for example i,

$$out_i = predictions_{i, targets_i} in TopKIncludingTies(predictions_i)$$

sparseSoftmaxCrossEntropyWithLogits Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t, TensorType tlabels, OneOf `[Int32, Int64]` tlabels) 
=> Tensor v1 t

features: batch_size x num_classes matrix

-> Tensor v2 tlabels

labels: batch_size vector with values in [0, num_classes). - This is the label for the given minibatch entry.

-> (Tensor Value t, Tensor Value t)

(loss, backprop)

  • loss: Per example loss (batch_size vector).
  • backprop: backpropagated gradients (batch_size x num_classes matrix).

Computes softmax cross entropy cost and gradients to backpropagate.

Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept - a matrix of label probabilities, but rather a single label per row - of features. This label is considered to have probability 1.0 for the - given row.

Inputs are the logits, not probabilities.

softmaxCrossEntropyWithLogits Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

features: batch_size x num_classes matrix

-> Tensor v2 t

labels: batch_size x num_classes matrix - The caller must ensure that each batch of labels represents a valid - probability distribution.

-> (Tensor Value t, Tensor Value t)

(loss, backprop)

  • loss: Per example loss (batch_size vector).
  • backprop: backpropagated gradients (batch_size x num_classes matrix).

Computes softmax cross entropy cost and gradients to backpropagate.

Inputs are the logits, not probabilities.

logSoftmax Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

logits: 2-D with shape `[batch_size, num_classes]`.

-> Tensor Value t

logsoftmax: Same shape as logits.

Computes log softmax activations.

For each batch i and class j we have

logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))

softsignGrad Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

gradients: The backpropagated gradients to the corresponding softsign operation.

-> Tensor v2 t

features: The features passed as input to the corresponding softsign operation.

-> Tensor Value t

backprops: The gradients: `gradients / (1 + abs(-features)) ** 2`.

Computes softsign gradients for a softsign operation.

softplus Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

features

-> Tensor Value t

activations

Computes softplus: `log(exp(features) + 1)`.

eluGrad Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

gradients: The backpropagated gradients to the corresponding Elu operation.

-> Tensor v2 t

outputs: The outputs of the corresponding Elu operation.

-> Tensor Value t

backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0, - gradients otherwise.

Computes gradients for the exponential linear (Elu) operation.

elu Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

features

-> Tensor Value t

activations

Computes exponential linear: `exp(features) - 1` if < 0, features otherwise.

See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)

relu6 Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

features

-> Tensor Value t

activations

Computes rectified linear 6: `min(max(features, 0), 6)`.

reluGrad Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

gradients: The backpropagated gradients to the corresponding Relu operation.

-> Tensor v2 t

features: The features passed as input to the corresponding Relu operation, OR - the outputs of that operation (both work equivalently).

-> Tensor Value t

backprops: `gradients * (features > 0)`.

Computes rectified linear gradients for a Relu operation.

dilation2DBackpropInput Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

input: 4-D with shape `[batch, in_height, in_width, depth]`.

-> Tensor v2 t

filter: 3-D with shape `[filter_height, filter_width, depth]`.

-> Tensor v3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

-> Tensor Value t

in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`.

Computes the gradient of morphological 2-D dilation with respect to the input.

maxPoolGrad Source

Arguments

:: (TensorType t, OneOf `[Word16, Float]` t) 
=> Tensor v1 t

orig_input: The original input tensor.

-> Tensor v2 t

orig_output: The original output tensor.

-> Tensor v3 t

grad: 4-D. Gradients w.r.t. the output of max_pool.

-> Tensor Value t

output: Gradients w.r.t. the input to max_pool.

Computes gradients of the maxpooling function.

lRNGrad Source

Arguments

:: (TensorType t, OneOf `[Word16, Float]` t) 
=> Tensor v1 t

input_grads: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v2 t

input_image: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v3 t

output_image: 4-D with shape `[batch, height, width, channels]`.

-> Tensor Value t

output: The gradients for LRN.

Gradients for Local Response Normalization.

maxPool3DGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 Float

orig_input: The original input tensor.

-> Tensor v2 Float

orig_output: The original output tensor.

-> Tensor v3 t

grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

-> Tensor Value t

output

Computes gradients of max pooling function.

conv3DBackpropFilterV2 Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

input: Shape `[batch, depth, rows, cols, in_channels]`.

-> Tensor v2 Int32

filter_sizes: An integer vector representing the tensor shape of filter, - where filter is a 5-D - `[filter_depth, filter_height, filter_width, in_channels, out_channels]` - tensor.

-> Tensor v3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

-> Tensor Value t

output

Computes the gradients of 3-D convolution with respect to the filter.

conv3DBackpropFilter Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

input: Shape `[batch, depth, rows, cols, in_channels]`.

-> Tensor v2 t

filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - in_channels must match between input and filter.

-> Tensor v3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

-> Tensor Value t

output

Computes the gradients of 3-D convolution with respect to the filter.

conv3D Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.

-> Tensor v2 t

filter: Shape `[filter_depth, filter_height, filter_width, in_channels, - out_channels]`. in_channels must match between input and filter.

-> Tensor Value t

output

Computes a 3-D convolution given 5-D input and filter tensors.

In signal processing, cross-correlation is a measure of similarity of - two waveforms as a function of a time-lag applied to one of them. This - is also known as a sliding dot product or sliding inner-product.

Our Conv3D implements a form of cross-correlation.

depthwiseConv2dNativeBackpropFilter Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v2 Int32

filter_sizes: An integer vector representing the tensor shape of filter, - where filter is a 4-D - `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.

-> Tensor v3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

-> Tensor Value t

output: 4-D with shape - `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. - the filter input of the convolution.

Computes the gradients of depthwise convolution with respect to the filter.

conv2DBackpropFilter Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v2 Int32

filter_sizes: An integer vector representing the tensor shape of filter, - where filter is a 4-D - `[filter_height, filter_width, in_channels, out_channels]` tensor.

-> Tensor v3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

-> Tensor Value t

output: 4-D with shape - `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. - the filter input of the convolution.

Computes the gradients of convolution with respect to the filter.

conv2DBackpropInput Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 Int32

input_sizes: An integer vector representing the shape of input, - where input is a 4-D `[batch, height, width, channels]` tensor.

-> Tensor v2 t

filter: 4-D with shape - `[filter_height, filter_width, in_channels, out_channels]`.

-> Tensor v3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

-> Tensor Value t

output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient - w.r.t. the input of the convolution.

Computes the gradients of convolution with respect to the input.

conv2D Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

input

-> Tensor v2 t

filter

-> Tensor Value t

output

Computes a 2-D convolution given 4-D input and filter tensors.

Given an input tensor of shape `[batch, in_height, in_width, in_channels]` - and a filter / kernel tensor of shape - `[filter_height, filter_width, in_channels, out_channels]`, this op - performs the following:

  1. Flattens the filter to a 2-D matrix with shape - `[filter_height * filter_width * in_channels, output_channels]`.
  2. Extracts image patches from the input tensor to form a *virtual* - tensor of shape `[batch, out_height, out_width, - filter_height * filter_width * in_channels]`.
  3. For each patch, right-multiplies the filter matrix and the image patch - vector.

In detail, with the default NHWC format,

output[b, i, j, k] = - sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * - filter[di, dj, q, k]

Must have `strides[0] = strides[3] = 1`. For the most common case of the same - horizontal and vertices strides, `strides = [1, stride, stride, 1]`.

biasAdd Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

value: Any number of dimensions.

-> Tensor v2 t

bias: 1-D with size the last dimension of value.

-> Tensor Value t

output: Broadcasted sum of value and bias.

Adds bias to value.

This is a special case of `tf.add` where bias is restricted to be 1-D. - Broadcasting is supported, so value may have any number of dimensions.

fusedBatchNorm Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

x: A 4D Tensor for input data.

-> Tensor v2 t

scale: A 1D Tensor for scaling factor, to scale the normalized x.

-> Tensor v3 t

offset: A 1D Tensor for offset, to shift to the normalized x.

-> Tensor v4 t

mean: A 1D Tensor for population mean. Used for inference only; - must be empty for training.

-> Tensor v5 t

variance: A 1D Tensor for population variance. Used for inference only; - must be empty for training.

-> (Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)

(y, batch_mean, batch_variance, reserve_space_1, reserve_space_2)

  • y: A 4D Tensor for output data.
  • batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow - to compute the running mean.
  • batch_variance: A 1D Tensor for the computed batch variance, to be used by - TensorFlow to compute the running variance.
  • reserve_space_1: A 1D Tensor for the computed batch mean, to be reused - in the gradient computation.
  • reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance - in the cuDNN case), to be used in the gradient computation.

Batch normalization.

Note that the size of 4D Tensors are defined by either NHWC or NCHW. - The size of 1D Tensors matches the dimension C of the 4D Tensors.

batchNormWithGlobalNormalizationGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Bool

scale_after_normalization: A bool indicating whether the resulted tensor - needs to be multiplied with gamma.

-> Float

variance_epsilon: A small float number to avoid dividing by 0.

-> Tensor v1 t

t: A 4D input Tensor.

-> Tensor v2 t

m: A 1D mean Tensor with size matching the last dimension of t. + with the normalized tensor.

-> Tensor Build t

result

Batch normalization.

This op is deprecated. Prefer `tf.nn.batch_normalization`.

batchNormWithGlobalNormalization'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Bool

scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

-> Float

variance_epsilon: A small float number to avoid dividing by 0.

-> Tensor v'1 t

t: A 4D input Tensor.

-> Tensor v'2 t

m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, - or a saved moving average thereof.

-> Tensor v3 t

v: A 1D variance Tensor with size matching the last dimension of t. + or a saved moving average thereof.

-> Tensor v'3 t

v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, - or a saved moving average thereof.

-> Tensor v4 t

gamma: A 1D gamma Tensor with size matching the last dimension of t. + or a saved moving average thereof.

-> Tensor v'4 t

beta: A 1D beta Tensor with size matching the last dimension of t. + An offset to be added to the normalized tensor.

-> Tensor v'5 t

gamma: A 1D gamma Tensor with size matching the last dimension of t. + If "scale_after_normalization" is true, this tensor will be multiplied + with the normalized tensor.

-> Tensor Build t

result

batchNormWithGlobalNormalizationGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Bool

scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

-> Float

variance_epsilon: A small float number to avoid dividing by 0.

-> Tensor v'1 t

t: A 4D input Tensor.

-> Tensor v'2 t

m: A 1D mean Tensor with size matching the last dimension of t. + This is the first output from tf.nn.moments, + or a saved moving average thereof.

-> Tensor v'3 t

v: A 1D variance Tensor with size matching the last dimension of t. + This is the second output from tf.nn.moments, + or a saved moving average thereof.

-> Tensor v'4 t

gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this Tensor will be multiplied - with the normalized Tensor.

-> Tensor v5 t

backprop: 4D backprop Tensor.

-> (Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)

(dx, dm, dv, db, dg)

  • dx: 4D backprop tensor for input.
  • dm: 1D backprop tensor for mean.
  • dv: 1D backprop tensor for variance.
  • db: 1D backprop tensor for beta.
  • dg: 1D backprop tensor for gamma.

Gradients for batch normalization.

This op is deprecated. See `tf.nn.batch_normalization`.

batchFFT3D Source

Arguments

:: Tensor v1 (Complex Float)

input

-> Tensor Value (Complex Float)

output

batchIFFT2D Source

Arguments

:: Tensor v1 (Complex Float)

input

-> Tensor Value (Complex Float)

output

avgPool Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

value: 4-D with shape `[batch, height, width, channels]`.

-> Tensor Value t

output: The average pooled output tensor.

Performs average pooling on the input.

Each entry in output is the mean of the corresponding size ksize - window in value.

batchFFT2D Source

Arguments

:: Tensor v1 (Complex Float)

input

-> Tensor Value (Complex Float)

output

batchFFT Source

Arguments

:: Tensor v1 (Complex Float)

input

-> Tensor Value (Complex Float)

output

requantizationRange Source

Arguments

:: (TensorType tinput, OneOf `[Int16, Int32, Word16, Word8]` tinput) 
=> Tensor v1 tinput

input

-> Tensor v2 Float

input_min: The float value that the minimum quantized input value represents.

-> Tensor v3 Float

input_max: The float value that the maximum quantized input value represents.

-> (Tensor Value Float, Tensor Value Float)

(output_min, output_max)

  • output_min: The computed min output.
  • output_max: the computed max output.

Given a quantized tensor described by (input, input_min, input_max), outputs a

range that covers the actual values present in that tensor. This op is - typically used to produce the requested_output_min and requested_output_max for - Requantize.

requantize Source

Arguments

:: (TensorType tinput, OneOf `[Int16, Int32, Word16, Word8]` tinput, TensorType out_type, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v1 tinput

input

-> Tensor v2 Float

input_min: The float value that the minimum quantized input value represents.

-> Tensor v3 Float

input_max: The float value that the maximum quantized input value represents.

-> Tensor v4 Float

requested_output_min: The float value that the minimum quantized output value represents.

-> Tensor v5 Float

requested_output_max: The float value that the maximum quantized output value represents.

-> (Tensor Value out_type, Tensor Value Float, Tensor Value Float)

(output, output_min, output_max)

  • output
  • output_min: The requested_output_min value is copied into this output.
  • output_max: The requested_output_max value is copied into this output.

Convert the quantized input tensor into a lower-precision output, using the

output range specified with requested_output_min and requested_output_max.

input_min, input_max
are scalar floats that specify the range for the float - interpretation of the input data. For example, if input_min is -1.0f and - input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 - value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.

quantizeDownAndShrinkRange Source

Arguments

:: (TensorType tinput, OneOf `[Int16, Int32, Word16, Word8]` tinput, TensorType out_type, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v1 tinput

input

-> Tensor v2 Float

input_min: The float value that the minimum quantized input value represents.

-> Tensor v3 Float

input_max: The float value that the maximum quantized input value represents.

-> (Tensor Value out_type, Tensor Value Float, Tensor Value Float)

(output, output_min, output_max)

  • output
  • output_min: The float value that the minimum quantized output value represents.
  • output_max: The float value that the maximum quantized output value represents.

Convert the quantized input tensor into a lower-precision output, using the

actual distribution of the values to maximize the usage of the lower bit depth - and adjusting the output min and max ranges accordingly.

input_min, input_max
are scalar floats that specify the range for the float - interpretation of the input data. For example, if input_min is -1.0f and - input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 - value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.

This operator tries to squeeze as much precision as possible into an output with - a lower bit depth by calculating the actual min and max values found in the - data. For example, maybe that quint16 input has no values lower than 16,384 and - none higher than 49,152. That means only half the range is actually needed, all - the float interpretations are between -0.5f and 0.5f, so if we want to compress - the data into a quint8 output, we can use that range rather than the theoretical - -1.0f to 1.0f that is suggested by the input min and max.

In practice, this is most useful for taking output from operations like - QuantizedMatMul that can produce higher bit-depth outputs than their inputs and - may have large potential output ranges, but in practice have a distribution of - input values that only uses a small fraction of the possible range. By feeding - that output into this operator, we can reduce it from 32 bits down to 8 with - minimal loss of accuracy.

quantizedMatMul Source

Arguments

:: (TensorType t1, OneOf `[Int16, Int32, Word16, Word8]` t1, TensorType t2, OneOf `[Int16, Int32, Word16, Word8]` t2, TensorType toutput, OneOf `[Int16, Int32, Word16, Word8]` toutput) 
=> Tensor v1 t1

a: Must be a two-dimensional tensor.

-> Tensor v2 t2

b: Must be a two-dimensional tensor.

-> Tensor v3 Float

min_a: The float value that the lowest quantized a value represents.

-> Tensor v4 Float

max_a: The float value that the highest quantized a value represents.

-> Tensor v5 Float

min_b: The float value that the lowest quantized b value represents.

-> Tensor v6 Float

max_b: The float value that the highest quantized b value represents.

-> (Tensor Value toutput, Tensor Value Float, Tensor Value Float)

(out, min_out, max_out)

  • out
  • min_out: The float value that the lowest quantized output value represents.
  • max_out: The float value that the highest quantized output value represents.

Perform a quantized matrix multiplication of a by the matrix b.

The inputs must be two-dimensional matrices and the inner dimension of - a (after being transposed if transpose_a is non-zero) must match the - outer dimension of b (after being transposed if transposed_b is - non-zero).

cumprod Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

x

-> Tensor v2 tidx

axis

-> Tensor Value t

out

Compute the cumulative product of the tensor x along axis.

By default, this op performs an inclusive cumprod, which means that the first - element of the input is identical to the first element of the output: - ```prettyprint - tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c] - ```

By setting the exclusive kwarg to True, an exclusive cumprod is - performed instead: - ```prettyprint - tf.cumprod([a, b, c], exclusive=True) ==> [0, a, a * b] - ```

By setting the reverse kwarg to True, the cumprod is performed in the - opposite direction: - ```prettyprint - tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c] - ``` - This is more efficient than using separate `tf.reverse` ops.

The reverse and exclusive kwargs can also be combined: - ```prettyprint - tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 0] - ```

cumsum Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

x

-> Tensor v2 tidx

axis

-> Tensor Value t

out

Compute the cumulative sum of the tensor x along axis.

By default, this op performs an inclusive cumsum, which means that the first - element of the input is identical to the first element of the output: - ```prettyprint - tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c] - ```

By setting the exclusive kwarg to True, an exclusive cumsum is - performed instead: - ```prettyprint - tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b] - ```

By setting the reverse kwarg to True, the cumsum is performed in the - opposite direction: - ```prettyprint - tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c] - ``` - This is more efficient than using separate `tf.reverse` ops.

The reverse and exclusive kwargs can also be combined: - ```prettyprint - tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0] - ```

cross Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

a: A tensor containing 3-element vectors.

-> Tensor v2 t

b: Another tensor, of same type and shape as a.

-> Tensor Value t

product: Pairwise cross product of the vectors in a and b.

Compute the pairwise cross product.

a and b must be the same shape; they can either be simple 3-element vectors, - or any shape where the innermost dimension is 3. In the latter case, each pair - of corresponding 3-element vectors is cross-multiplied independently.

iFFT3D Source

Arguments

:: Tensor v1 (Complex Float)

input: A complex64 tensor.

-> Tensor Value (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 3 - dimensions of input are replaced with their inverse 3D Fourier Transform.

compatibility(numpy) - Equivalent to np.fft3 - end_compatibility

Compute the inverse 3-dimensional discrete Fourier Transform over the inner-most

3 dimensions of input.

fFT3D Source

Arguments

:: Tensor v1 (Complex Float)

input: A complex64 tensor.

-> Tensor Value (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 3 - dimensions of input are replaced with their 3D Fourier Transform.

compatibility(numpy) - Equivalent to np.fft3 - end_compatibility

Compute the 3-dimensional discrete Fourier Transform over the inner-most 3

dimensions of input.

maxPoolGradWithArgmax Source

Arguments

:: (TensorType targmax, OneOf `[Int32, Int64]` targmax, TensorType t, OneOf `[Word16, Float]` t) 
=> Tensor v1 t

input: The original input.

-> Tensor v2 t

grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the - output of max_pool.

-> Tensor v3 targmax

argmax: The indices of the maximum values chosen for each output of max_pool.

-> Tensor Value t

output: Gradients w.r.t. the input of max_pool.

Computes gradients of the maxpooling function.

fFT2D Source

Arguments

:: Tensor v1 (Complex Float)

input: A complex64 tensor.

-> Tensor Value (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 2 - dimensions of input are replaced with their 2D Fourier Transform.

compatibility(numpy) - Equivalent to np.fft2 - end_compatibility

Compute the 2-dimensional discrete Fourier Transform over the inner-most

2 dimensions of input.

iFFT Source

Arguments

:: Tensor v1 (Complex Float)

input: A complex64 tensor.

-> Tensor Value (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most - dimension of input is replaced with its inverse 1D Fourier Transform.

Compute the inverse 1-dimensional discrete Fourier Transform over the inner-most

dimension of input.

fFT Source

Arguments

:: Tensor v1 (Complex Float)

input: A complex64 tensor.

-> Tensor Value (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most - dimension of input is replaced with its 1D Fourier Transform.

Compute the 1-dimensional discrete Fourier Transform over the inner-most

dimension of input.

conj Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float]` t) 
=> Tensor v1 t

input

-> Tensor Value t

output

Returns the complex conjugate of a complex number.

Given a tensor input of complex numbers, this operation returns a tensor of - complex numbers that are the complex conjugate of each element in input. The - complex numbers in input must be of the form \(a + bj\), where *a* is the - real part and *b* is the imaginary part.

The complex conjugate returned by this operation is of the form \(a - bj\).

For example:

``` - # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] - tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] - ```

real Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float]` t, TensorType tout, OneOf `[Double, Float]` tout) 
=> Tensor v1 t

input

-> Tensor Value tout

output

Returns the real part of a complex number.

Given a tensor input of complex numbers, this operation returns a tensor of - type float that is the real part of each element in input. All elements in - input must be complex numbers of the form \(a + bj\), where *a* is the real - part returned by this operation and *b* is the imaginary part.

For example:

``` - # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] - tf.real(input) ==> [-2.25, 3.25] - ```

complex Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t, TensorType tout, OneOf `[Complex Double, Complex Float]` tout) 
=> Tensor v1 t

real

-> Tensor v2 t

imag

-> Tensor Value tout

out

Converts two real numbers to a complex number.

Given a tensor real representing the real part of a complex number, and a - tensor imag representing the imaginary part of a complex number, this - operation returns complex numbers elementwise of the form \(a + bj\), where - *a* represents the real part and *b* represents the imag part.

The input tensors real and imag must have the same shape.

For example:

``` - # tensor real is [2.25, 3.25] - # tensor imag is [4.75, 5.75] - tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] - ```

range Source

Arguments

:: (TensorType tidx, OneOf `[Int32, Int64, Double, Float]` tidx) 
=> Tensor v1 tidx

start: 0-D (scalar). First entry in the sequence.

-> Tensor v2 tidx

limit: 0-D (scalar). Upper limit of sequence, exclusive.

-> Tensor v3 tidx

delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

-> Tensor Value tidx

output: 1-D.

Creates a sequence of numbers.

This operation creates a sequence of numbers that begins at start and - extends by increments of delta up to but not including limit.

For example:

``` - # start is 3 - # limit is 18 - # delta is 3 - tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] - ```

any Source

Arguments

:: (TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 Bool

input: The tensor to reduce.

-> Tensor v2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Value Bool

output: The reduced tensor.

Computes the "logical or" of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless - keep_dims is true, the rank of the tensor is reduced by 1 for each entry in - reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

sparseSegmentMean Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

data

-> Tensor v2 tidx

indices: A 1-D tensor. Has same rank as segment_ids.

-> Tensor v3 Int32

segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

-> Tensor Value t

output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

Computes the mean along sparse segments of a tensor.

Read the section on - Segmentation for an explanation - of segments.

Like SegmentMean, but segment_ids can have rank less than `data`'s first - dimension, selecting a subset of dimension 0, specified by indices.

sparseSegmentSum Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

data

-> Tensor v2 tidx

indices: A 1-D tensor. Has same rank as segment_ids.

-> Tensor v3 Int32

segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

-> Tensor Value t

output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

Computes the sum along sparse segments of a tensor.

Read the section on - Segmentation for an explanation - of segments.

Like SegmentSum, but segment_ids can have rank less than `data`'s first - dimension, selecting a subset of dimension 0, specified by indices.

For example:

```prettyprint - c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])

# Select two rows, one segment. - tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) - ==> [[0 0 0 0]]

# Select two rows, two segment. - tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) - ==> [[ 1 2 3 4] - [-1 -2 -3 -4]]

# Select all rows, two segments. - tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) - ==> [[0 0 0 0] - [5 6 7 8]]

# Which is equivalent to: - tf.segment_sum(c, tf.constant([0, 0, 1])) - ```

unsortedSegmentSum Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor v1 t

data

-> Tensor v2 tindices

segment_ids: A tensor whose shape is a prefix of `data.shape`.

-> Tensor v3 Int32

num_segments

-> Tensor Value t

output: Has same shape as data, except for the first `segment_ids.rank` - dimensions, which are replaced with a single dimension which has size - num_segments.

Computes the sum along segments of a tensor.

Read the section on - Segmentation for an explanation - of segments.

Computes a tensor such that - `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such - that `segment_ids[j...] == i`. Unlike SegmentSum, segment_ids - need not be sorted and need not cover all values in the full - range of valid values.

If the sum is empty for a given segment ID i, `output[i] = 0`.

num_segments should equal the number of distinct segment IDs.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/UnsortedSegmentSum.png" alt - /div

segmentMin Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor v1 t

data

-> Tensor v2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

-> Tensor Value t

output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

Computes the minimum along segments of a tensor.

Read the section on - Segmentation for an explanation - of segments.

Computes a tensor such that - \(output_i = min_j(data_j)\) where min is over j such - that `segment_ids[j] == i`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/SegmentMin.png" alt - /div

segmentProd Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor v1 t

data

-> Tensor v2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

-> Tensor Value t

output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

Computes the product along segments of a tensor.

Read the section on - Segmentation for an explanation - of segments.

Computes a tensor such that - \(output_i = prod_j data_j\) where the product is over j such - that `segment_ids[j] == i`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/SegmentProd.png" alt - /div

segmentMean Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor v1 t

data

-> Tensor v2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

-> Tensor Value t

output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

Computes the mean along segments of a tensor.

Read the section on - Segmentation for an explanation - of segments.

Computes a tensor such that - \(output_i = frac{sum_j data_j}{N}\) where mean is - over j such that `segment_ids[j] == i` and N is the total number of - values summed.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/SegmentMean.png" alt - /div

segmentSum Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor v1 t

data

-> Tensor v2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

-> Tensor Value t

output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

Computes the sum along segments of a tensor.

Read the section on Segmentation - for an explanation of segments.

Computes a tensor such that - \(output_i = sum_j data_j\) where sum is over j such - that `segment_ids[j] == i`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/SegmentSum.png" alt - /div

argMin Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

input

-> Tensor v2 tidx

dimension: int32, 0 <= dimension < rank(input). Describes which dimension - of the input Tensor to reduce across. For vectors, use dimension = 0.

-> Tensor Value Int64

output

Returns the index with the smallest value across dimensions of a tensor.

max Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

input: The tensor to reduce.

-> Tensor v2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Value t

output: The reduced tensor.

Computes the maximum of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless - keep_dims is true, the rank of the tensor is reduced by 1 for each entry in - reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

min Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

input: The tensor to reduce.

-> Tensor v2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Value t

output: The reduced tensor.

Computes the minimum of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless - keep_dims is true, the rank of the tensor is reduced by 1 for each entry in - reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

prod Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

input: The tensor to reduce.

-> Tensor v2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Value t

output: The reduced tensor.

Computes the product of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless - keep_dims is true, the rank of the tensor is reduced by 1 for each entry in - reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

sum Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

input: The tensor to reduce.

-> Tensor v2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Value t

output: The reduced tensor.

Computes the sum of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless - keep_dims is true, the rank of the tensor is reduced by 1 for each entry in - reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

sparseMatMul Source

Arguments

:: (TensorType ta, OneOf `[Word16, Float]` ta, TensorType tb, OneOf `[Word16, Float]` tb) 
=> Tensor v1 ta

a

-> Tensor v2 tb

b

-> Tensor Value Float

product

Multiply matrix "a" by matrix "b".

The inputs must be two-dimensional matrices and the inner dimension of "a" must - match the outer dimension of "b". This op is optimized for the case where at - least one of "a" or "b" is sparse. The breakeven for using this versus a dense - matrix multiply on one platform was 30% zero values in the sparse matrix.

matMul Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t) 
=> Tensor v1 t

a

-> Tensor v2 t

b

-> Tensor Value t

product

Multiply the matrix "a" by the matrix "b".

The inputs must be two-dimensional matrices and the inner dimension of - "a" (after being transposed if transpose_a is true) must match the - outer dimension of "b" (after being transposed if transposed_b is - true).

  • Note*: The default kernel implementation for MatMul on GPUs uses - cublas.

logicalAnd Source

Arguments

:: Tensor v1 Bool

x

-> Tensor v2 Bool

y

-> Tensor Value Bool

z

Returns the truth value of x AND y element-wise.

  • NOTE*: LogicalAnd supports broadcasting. More about broadcasting - here

equal Source

Returns the truth value of (x == y) element-wise.

  • NOTE*: Equal supports broadcasting. More about broadcasting - here

greaterEqual Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value Bool

z

Returns the truth value of (x >= y) element-wise.

  • NOTE*: GreaterEqual supports broadcasting. More about broadcasting - here

lessEqual Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value Bool

z

Returns the truth value of (x <= y) element-wise.

  • NOTE*: LessEqual supports broadcasting. More about broadcasting - here

less Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value Bool

z

Returns the truth value of (x < y) element-wise.

  • NOTE*: Less supports broadcasting. More about broadcasting - here

polygamma Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

a

-> Tensor v2 t

x

-> Tensor Value t

z

Compute the polygamma function \(psi^{(n)}(x)\).

The polygamma function is defined as:

``` - psi^{(n)}(x) = frac{d^n}{dx^n} psi(x) - ``` - where \(psi(x)\) is the digamma function.

igamma Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

a

-> Tensor v2 t

x

-> Tensor Value t

z

Compute the lower regularized incomplete Gamma function `Q(a, x)`.

The lower regularized incomplete Gamma function is defined as:

``` - P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) - ``` - where - ``` - gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt - ``` - is the lower incomplete Gamma function.

Note, above `Q(a, x)` (Igammac) is the upper regularized complete - Gamma function.

igammac Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

a

-> Tensor v2 t

x

-> Tensor Value t

z

Compute the upper regularized incomplete Gamma function `Q(a, x)`.

The upper regularized incomplete Gamma function is defined as:

``` - Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) - ``` - where - ``` - Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt - ``` - is the upper incomplete Gama function.

Note, above `P(a, x)` (Igamma) is the lower regularized complete - Gamma function.

mod Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns element-wise remainder of division.

  • NOTE*: Mod supports broadcasting. More about broadcasting - here

minimum Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns the min of x and y (i.e. x < y ? x : y) element-wise.

  • NOTE*: Minimum supports broadcasting. More about broadcasting - here

maximum Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns the max of x and y (i.e. x > y ? x : y) element-wise.

  • NOTE*: Maximum supports broadcasting. More about broadcasting - here

squaredDifference Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns (x - y)(x - y) element-wise.

  • NOTE*: SquaredDifference supports broadcasting. More about broadcasting - here

softplusGrad Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

gradients: The backpropagated gradients to the corresponding softplus operation.

-> Tensor v2 t

features: The features passed as input to the corresponding softplus operation.

-> Tensor Value t

backprops: The gradients: `gradients / (1 + exp(-features))`.

Computes softplus gradients for a softplus operation.

batchToSpace Source

Arguments

:: (TensorType t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Int64

block_size

-> Tensor v1 t

input: 4-D tensor with shape + with the normalized Tensor.

-> Tensor v'5 t

backprop: 4D backprop Tensor.

-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

(dx, dm, dv, db, dg)

  • dx: 4D backprop tensor for input.
  • dm: 1D backprop tensor for mean.
  • dv: 1D backprop tensor for variance.
  • db: 1D backprop tensor for beta.
  • dg: 1D backprop tensor for gamma.

Gradients for batch normalization.

This op is deprecated. See `tf.nn.batch_normalization`.

batchNormWithGlobalNormalizationGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Bool

scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

-> Float

variance_epsilon: A small float number to avoid dividing by 0.

-> Tensor v'1 t

t: A 4D input Tensor.

-> Tensor v'2 t

m: A 1D mean Tensor with size matching the last dimension of t. + This is the first output from tf.nn.moments, + or a saved moving average thereof.

-> Tensor v'3 t

v: A 1D variance Tensor with size matching the last dimension of t. + This is the second output from tf.nn.moments, + or a saved moving average thereof.

-> Tensor v'4 t

gamma: A 1D gamma Tensor with size matching the last dimension of t. + If "scale_after_normalization" is true, this Tensor will be multiplied + with the normalized Tensor.

-> Tensor v'5 t

backprop: 4D backprop Tensor.

-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

(dx, dm, dv, db, dg)

  • dx: 4D backprop tensor for input.
  • dm: 1D backprop tensor for mean.
  • dv: 1D backprop tensor for variance.
  • db: 1D backprop tensor for beta.
  • dg: 1D backprop tensor for gamma.

batchSelfAdjointEig

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input

-> Tensor Build t

output

batchSelfAdjointEig'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build t

output

batchSelfAdjointEigV2

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input

-> (Tensor Build t, Tensor Build t)

(e, v)

  • e
  • v

batchSelfAdjointEigV2'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input

-> (Tensor Build t, Tensor Build t)

(e, v)

  • e
  • v

batchSvd

Arguments

:: OneOf `[Complex Double, Complex Float, Double, Float]` t 
=> Tensor v'1 t

input

-> (Tensor Build t, Tensor Build t, Tensor Build t)

(s, u, v)

  • s
  • u
  • v

batchSvd'

Arguments

:: OneOf `[Complex Double, Complex Float, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input

-> (Tensor Build t, Tensor Build t, Tensor Build t)

(s, u, v)

  • s
  • u
  • v

batchToSpace

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tidx) 
=> Int64

block_size

-> Tensor v'1 t

input: 4-D tensor with shape `[batch*block_size*block_size, height_padblock_size, width_padblock_size, depth]`. Note that the batch size of the input tensor must be divisible by - `block_size * block_size`.

-> Tensor v2 tidx

crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + `block_size * block_size`.

-> Tensor v'2 tidx

crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies how many elements to crop from the intermediate result across the spatial - dimensions as follows:

crops = [[crop_top, crop_bottom], [crop_left, crop_right]]

-> Tensor Value t

output: 4-D with shape `[batch, height, width, depth]`, where:

height = height_pad - crop_top - crop_bottom + dimensions as follows:

crops = [[crop_top, crop_bottom], [crop_left, crop_right]]

-> Tensor Build t

output: 4-D with shape `[batch, height, width, depth]`, where:

height = height_pad - crop_top - crop_bottom width = width_pad - crop_left - crop_right

The attr block_size must be greater than one. It indicates the block size.

Some examples:

  1. For the following input of shape `[4, 1, 1, 1]` and block_size of 2:

```prettyprint [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ```

The output tensor has shape `[1, 2, 2, 1]` and value:

```prettyprint @@ -341,168 +241,41 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core cropping. This is the reverse transformation of SpaceToBatch. More specifically, this op outputs a copy of the input tensor where values from the batch dimension are moved in spatial blocks to the height and width dimensions, - followed by cropping along the height and width dimensions.

mul Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns x * y element-wise.

  • NOTE*: Mul supports broadcasting. More about broadcasting - here

rint Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Returns element-wise integer closest to x.

If the result is midway between two representable values, - the even representable is chosen. - For example:

``` - rint(-1.5) ==> -2.0 - rint(0.5000001) ==> 1.0 - rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] - ```

ceil Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Returns element-wise smallest integer in not less than x.

floor Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Returns element-wise largest integer not greater than x.

maxPool3D Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

-> Tensor Value t

output: The max pooled output tensor.

Performs 3D max pooling on the input.

isInf Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value Bool

y

Returns which elements of x are Inf.

compatibility(numpy) - Equivalent to np.isinf - end_compatibility

depthwiseConv2dNativeBackpropInput Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 Int32

input_sizes: An integer vector representing the shape of input, - where input is a 4-D `[batch, height, width, channels]` tensor.

-> Tensor v2 t

filter: 4-D with shape - `[filter_height, filter_width, in_channels, depthwise_multiplier]`.

-> Tensor v3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - Gradients w.r.t. the output of the convolution.

-> Tensor Value t

output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient - w.r.t. the input of the convolution.

Computes the gradients of depthwise convolution with respect to the input.

isNan Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value Bool

y

Returns which elements of x are NaN.

compatibility(numpy) - Equivalent to np.isnan - end_compatibility

log1p Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes natural logarithm of (1 + x) element-wise.

I.e., \(y = log_e (1 + x)\).

asin Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes asin of x element-wise.

topKV2 Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

input: 1-D or higher with last dimension at least k.

-> Tensor v2 Int32

k: 0-D. Number of top elements to look for along the last dimension (along each - row for matrices).

-> (Tensor Value t, Tensor Value Int32)

(values, indices)

  • values: The k largest elements along each last dimensional slice.
  • indices: The indices of values within the last dimension of input.

Finds values and indices of the k largest elements for the last dimension.

If the input is a vector (rank-1), finds the k largest entries in the vector - and outputs their values and indices as vectors. Thus `values[j]` is the - j-th largest entry in input, and its index is `indices[j]`.

For matrices (resp. higher rank input), computes the top k entries in each - row (resp. vector along the last dimension). Thus,

values.shape = indices.shape = input.shape[:-1] + [k]

If two elements are equal, the lower-index element appears first.

This is the same as TopK, but takes k as in input rather than an attr.

cos Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes cos of x element-wise.

sin Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes sin of x element-wise.

randomUniformInt Source

Arguments

:: (TensorType tout, OneOf `[Int32, Int64]` tout, TensorType t, OneOf `[Int32, Int64]` t) 
=> Tensor v1 t

shape: The shape of the output tensor.

-> Tensor v2 tout

minval: 0-D. Inclusive lower bound on the generated integers.

-> Tensor v3 tout

maxval: 0-D. Exclusive upper bound on the generated integers.

-> Build (Tensor Value tout)

output: A tensor of the specified shape filled with uniform random integers.

Outputs random integers from a uniform distribution.

The generated values are uniform integers in the range `[minval, maxval)`. - The lower bound minval is included in the range, while the upper bound - maxval is excluded.

The random integers are slightly biased unless `maxval - minval` is an exact - power of two. The bias is small for values of `maxval - minval` significantly - smaller than the range of the output (either `2^32` or `2^64`).

erfc Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes the complementary error function of x element-wise.

digamma Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes Psi, the derivative of Lgamma (the log of the absolute value of

`Gamma(x)`), element-wise.

fusedResizeAndPadConv2D Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v2 Int32

size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

-> Tensor v3 Int32

paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

-> Tensor v4 t

filter: 4-D with shape - `[filter_height, filter_width, in_channels, out_channels]`.

-> Tensor Value t

output

Performs a resize and padding as a preprocess during a convolution.

It's often possible to do spatial transformations more efficiently as part of - the packing stage of a convolution, so this op allows for an optimized - implementation where these stages are fused together. This prevents the need to - write out the intermediate results as whole tensors, reducing memory pressure, - and we can get some latency gains by merging the transformation calculations. - The data_format attribute for Conv2D isn't supported by this op, and defaults to - NHWC order. - Internally this op uses a single per-graph scratch buffer, which means that it - will block if multiple versions are being run in parallel. This is because this - operator is primarily an optimization to minimize memory usage.

sub Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns x - y element-wise.

  • NOTE*: Sub supports broadcasting. More about broadcasting - here

sign Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Returns an element-wise indication of the sign of a number.

`y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.

For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.

lgamma Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes the log of the absolute value of `Gamma(x)` element-wise.

log Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes natural logarithm of x element-wise.

I.e., \(y = log_e x\).

exp Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes exponential of x element-wise. \(y = e^x\).

dilation2D Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

input: 4-D with shape `[batch, in_height, in_width, depth]`.

-> Tensor v2 t

filter: 3-D with shape `[filter_height, filter_width, depth]`.

-> Tensor Value t

output: 4-D with shape `[batch, out_height, out_width, depth]`.

Computes the grayscale dilation of 4-D input and 3-D filter tensors.

The input tensor has shape `[batch, in_height, in_width, depth]` and the - filter tensor has shape `[filter_height, filter_width, depth]`, i.e., each - input channel is processed independently of the others with its own structuring - function. The output tensor has shape - `[batch, out_height, out_width, depth]`. The spatial dimensions of the output - tensor depend on the padding algorithm. We currently only support the default - NHWC data_format.

In detail, the grayscale morphological 2-D dilation is the max-sum correlation - (for consistency with conv2d, we use unmirrored filters):

output[b, y, x, c] = - max_{dy, dx} input[b, - strides[1] * y + rates[1] * dy, - strides[2] * x + rates[2] * dx, - c] + - filter[dy, dx, c]

Max-pooling is a special case when the filter has size equal to the pooling - kernel size and contains all zeros.

Note on duality: The dilation of input by the filter is equal to the - negation of the erosion of `-input` by the reflected filter.

rsqrtGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Computes the gradient for the rsqrt of x wrt its input.

Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and dy - is the corresponding input gradient.

rsqrt Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes reciprocal of square root of x element-wise.

I.e., \(y = 1 / sqrt{x}\).

quantizedMaxPool Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Word16, Word8]` t) 
=> Tensor v1 t

input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.

-> Tensor v2 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v3 Float

max_input: The float value that the highest quantized input value represents.

-> (Tensor Value t, Tensor Value Float, Tensor Value Float)

(output, min_output, max_output)

  • output
  • min_output: The float value that the lowest quantized output value represents.
  • max_output: The float value that the highest quantized output value represents.

Produces the max pool of the input tensor for quantized types.

sqrt Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes square root of x element-wise.

I.e., \(y = sqrt{x} = x^{1/2}\).

identityReader Source

Arguments

:: Build (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

A Reader that outputs the queued work as both the key and value.

To use, enqueue strings in a Queue. ReaderRead will take the front - work string and output (work, work).

square Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes square of x element-wise.

I.e., \(y = x * x = x^2\).

quantizedReshape Source

Arguments

:: (TensorType t, TensorType tshape, OneOf `[Int32, Int64]` tshape) 
=> Tensor v1 t

tensor

-> Tensor v2 tshape

shape: Defines the shape of the output tensor.

-> Tensor v3 Float

input_min: The minimum value of the input.

-> Tensor v4 Float

input_max: The maximum value of the input.

-> (Tensor Value t, Tensor Value Float, Tensor Value Float)

(output, output_min, output_max)

  • output
  • output_min: This value is copied from input_min.
  • output_max: This value is copied from input_max.

Reshapes a quantized tensor as per the Reshape op.

```

reciprocalGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Computes the gradient for the inverse of x wrt its input.

Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy - is the corresponding input gradient.

invGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Computes the gradient for the inverse of x wrt its input.

Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy - is the corresponding input gradient.

inv Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes the reciprocal of x element-wise.

I.e., \(y = 1 / x\).

tensorArrayConcatV2 Source

Arguments

:: TensorType dtype 
=> Tensor v1 ByteString

handle: The handle to a TensorArray.

-> Tensor v2 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> (Tensor Value dtype, Tensor Value Int64)

(value, lengths)

  • value: All of the elements in the TensorArray, concatenated along the first - axis.
  • lengths: A vector of the row sizes of the original T elements in the - value output. In the example above, this would be the values: - `(n1, n2, ..., n(T-1))`.

Concat the elements from the TensorArray into value value.

Takes T elements of shapes

``` - (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) - ```

and concatenates them into a Tensor of shape:

```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```

All elements must have the same shape (excepting the first dimension).

complexAbs Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float]` t, TensorType tout, OneOf `[Double, Float]` tout) 
=> Tensor v1 t

x

-> Tensor Value tout

y

Computes the complex absolute value of a tensor.

Given a tensor x of complex numbers, this operation returns a tensor of type - float or double that is the absolute value of each element in x. All - elements in x must be complex numbers of the form \(a + bj\). The absolute - value is computed as \( sqrt{a^2 + b^2}\).

For example:

``` - # tensor x is [[-2.25 + 4.75j], [-3.25 + 5.75j]] - tf.complex_abs(x) ==> [5.25594902, 6.60492229] - ```

_HostCast Source

Arguments

:: (TensorType srcT, TensorType dstT) 
=> Tensor v1 srcT

x

-> Tensor Value dstT

y

Cast x of type SrcT to y of DstT.

_HostCast requires its input and produces its output in host memory.

resizeNearestNeighbor Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

-> Tensor Value t

resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

Resize images to size using nearest neighbor interpolation.

adjustContrast Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) 
=> Tensor v1 t

images

-> Tensor v2 Float

contrast_factor

-> Tensor v3 Float

min_value

-> Tensor v4 Float

max_value

-> Tensor Value Float

output

Deprecated. Disallowed in GraphDef version >= 2.

batchMatrixDiagPart Source

Arguments

:: TensorType t 
=> Tensor v1 t

input

-> Tensor Value t

diagonal

batchMatrixSetDiag Source

Arguments

:: TensorType t 
=> Tensor v1 t

input

-> Tensor v2 t

diagonal

-> Tensor Value t

output

batchMatrixDiag Source

Arguments

:: TensorType t 
=> Tensor v1 t

diagonal

-> Tensor Value t

output

fakeQuantWithMinMaxVarsPerChannelGradient Source

Arguments

:: Tensor v1 Float

gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, - shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.

-> Tensor v2 Float

inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape - same as gradients. - min, max: Quantization interval, floats of shape `[d]`.

-> Tensor v3 Float

min

-> Tensor v4 Float

max

-> (Tensor Value Float, Tensor Value Float, Tensor Value Float)

(backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

  • backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as - inputs: - `gradients * (inputs >= min && inputs <= max)`.
  • backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`: - `sum_per_d(gradients * (inputs < min))`.
  • backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`: - `sum_per_d(gradients * (inputs > max))`.

Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.

sparseSegmentSqrtNGrad Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

grad: gradient propagated to the SparseSegmentSqrtN op.

-> Tensor v2 tidx

indices: indices passed to the corresponding SparseSegmentSqrtN op.

-> Tensor v3 Int32

segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.

-> Tensor v4 Int32

output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.

-> Tensor Value t

output

Computes gradients for SparseSegmentSqrtN.

Returns tensor "output" with same shape as grad, except for dimension 0 whose - value is output_dim0.

fakeQuantWithMinMaxVarsPerChannel Source

Arguments

:: Tensor v1 Float

inputs

-> Tensor v2 Float

min

-> Tensor v3 Float

max

-> Tensor Value Float

outputs

Fake-quantize the inputs tensor of type float and one of the shapes: `[d]`,

`[b, d]` `[b, h, w, d]` via per-channel floats min and max of shape `[d]` - to outputs tensor of same shape as inputs.

min; max
is the clamping range for the inputs data in the corresponding - depth channel. Op divides this range into 255 steps (total of 256 values), then - replaces each inputs value with the closest of the quantized step values.

This operation has a gradient and thus allows for training min and max values.

scalarSummary Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 ByteString

tags: Tags for the summary.

-> Tensor v2 t

values: Same shape as `tags. Values for the summary.

-> Tensor Value ByteString

summary: Scalar. Serialized Summary protocol buffer.

Outputs a Summary protocol buffer with scalar values.

The input tags and values must have the same shape. The generated summary - has a summary value for each tag-value pair in tags and values.

neg Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes numerical negative value element-wise.

I.e., \(y = -x\).

fakeQuantWithMinMaxArgsGradient Source

Arguments

:: Tensor v1 Float

gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.

-> Tensor v2 Float

inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.

-> Tensor Value Float

backprops: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: - `gradients * (inputs >= min && inputs <= max)`.

Compute gradients for a FakeQuantWithMinMaxArgs operation.

debugNanCount Source

Arguments

:: TensorType t 
=> Tensor v1 t

input: Input tensor, non-Reference type.

-> Tensor Value Int64

output: An integer output tensor that is the number of NaNs in the input.

Debug NaN Value Counter Op

Counts number of NaNs in the input tensor, for debugging.

debugIdentity Source

Arguments

:: TensorType t 
=> Tensor v1 t

input: Input tensor, non-Reference type.

-> Tensor Value t

output: Output tensor that equals the input tensor.

Debug Identity Op.

Provides an identity mapping of the non-Ref type input tensor for debugging.

bitcast Source

Bitcasts a tensor from one type to another without copying data.

Given a tensor input, this operation returns a tensor that has the same buffer - data as input with datatype `type`.

If the input datatype T is larger than the output datatype `type` then the - shape changes from [...] to [..., sizeof(T)/sizeof(`type`)].

If T is smaller than `type`, the operator requires that the rightmost - dimension be equal to sizeof(`type`)/sizeof(T). The shape then goes from - [..., sizeof(`type`)/sizeof(T)] to [...].

  • NOTE*: Bitcast is implemented as a low-level cast, so machines with different - endian orderings will give different results.

sigmoid Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes sigmoid of x element-wise.

Specifically, `y = 1 / (1 + exp(-x))`.

copy Source

Arguments

:: TensorType t 
=> Tensor v1 t

input: Input tensor.

-> Tensor Value t

output: Output tensor, deep-copied from input.

Copy Op.

Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the - device on which the tensor is allocated.

Unlike the CopyHost Op, this op does not have HostMemory constraint on its - input or output.

fixedUnigramCandidateSampler Source

Arguments

:: Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled - candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

-> Tensor v1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is - the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing - the number of times each candidate is expected to occur in a batch - of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled - candidate representing the number of times the candidate is expected - to occur in a batch of sampled candidates. If unique=true, then this is a - probability.

Generates labels for candidate sampling with a learned unigram distribution.

A unigram sampler could use a fixed unigram distribution read from a - file or passed in as an in-memory array instead of building up the distribution - from data on the fly. There is also an option to skew the distribution by - applying a distortion power to the weights.

The vocabulary file should be in CSV-like format, with the last field - being the weight associated with the word.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the - possibility of efficient dense matrix multiplication. The disadvantage is that - the sampled candidates must be chosen independently of the context and of the - true labels.

listDiff Source

Arguments

:: (TensorType t, TensorType out_idx, OneOf `[Int32, Int64]` out_idx) 
=> Tensor v1 t

x: 1-D. Values to keep.

-> Tensor v2 t

y: 1-D. Values to remove.

-> (Tensor Value t, Tensor Value out_idx)

(out, idx)

  • out: 1-D. Values present in x but not in y.
  • idx: 1-D. Positions of x values preserved in out.

Computes the difference between two lists of numbers or strings.

Given a list x and a list y, this operation returns a list out that - represents all values that are in x but not in y. The returned list out - is sorted in the same order that the numbers appear in x (duplicates are - preserved). This operation also returns a list idx that represents the - position of each out element in x. In other words:

`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`

For example, given this input:

```prettyprint - x = [1, 2, 3, 4, 5, 6] - y = [1, 3, 5] - ```

This operation would return:

```prettyprint - out ==> [2, 4, 6] - idx ==> [1, 3, 5] - ```

extractImagePatches Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.

-> Tensor Value t

patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * - ksize_cols * depth]` containing image patches with size - `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension.

Extract patches from images and put them in the "depth" output dimension.

spaceToDepth Source

Arguments

:: TensorType t 
=> Int64

block_size: The size of the spatial block.

-> Tensor v1 t

input

-> Tensor Value t

output

SpaceToDepth for tensors of type T.

Rearranges blocks of spatial data, into depth. More specifically, - this op outputs a copy of the input tensor where values from the height - and width dimensions are moved to the depth dimension. - The attr block_size indicates the input block size and how the data is moved.

  • Non-overlapping blocks of size `block_size x block size` are rearranged - into depth at each location.
  • The depth of the output tensor is `input_depth * block_size * block_size`.
  • The input tensor's height and width must be divisible by block_size.

That is, assuming the input is in the shape: - `[batch, height, width, depth]`, - the shape of the output will be: - `[batch, heightblock_size, widthblock_size, depth*block_size*block_size]`

This operation requires that the input tensor be of rank 4, and that - block_size be >=1 and a divisor of both the input height and width.

This operation is useful for resizing the activations between convolutions - (but keeping all data), e.g. instead of pooling. It is also useful for training - purely convolutional models.

For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2:

```prettyprint - x = [[[[1], [2]], - [[3], [4]]]] - ```

This operation will output a tensor of shape `[1, 1, 1, 4]`:

```prettyprint - [[[[1, 2, 3, 4]]]] - ```

Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, - the corresponding output will have a single element (i.e. width and height are - both 1) and will have a depth of 4 channels (1 * block_size * block_size). - The output element shape is `[1, 1, 4]`.

For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.

```prettyprint + followed by cropping along the height and width dimensions.

batchToSpace'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Int64

block_size

-> Tensor v'1 t

input: 4-D tensor with shape + `[batch*block_size*block_size, height_padblock_size, width_padblock_size, + depth]`. Note that the batch size of the input tensor must be divisible by + `block_size * block_size`.

-> Tensor v'2 tidx

crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + how many elements to crop from the intermediate result across the spatial + dimensions as follows:

crops = [[crop_top, crop_bottom], [crop_left, crop_right]]

-> Tensor Build t

output: 4-D with shape `[batch, height, width, depth]`, where:

height = height_pad - crop_top - crop_bottom + width = width_pad - crop_left - crop_right

The attr block_size must be greater than one. It indicates the block size.

Some examples:

  1. For the following input of shape `[4, 1, 1, 1]` and block_size of 2:

```prettyprint + [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + ```

The output tensor has shape `[1, 2, 2, 1]` and value:

```prettyprint + x = [[[[1], [2]], [[3], [4]]]] + ```

  1. For the following input of shape `[4, 1, 1, 3]` and block_size of 2:

```prettyprint + [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + ```

The output tensor has shape `[1, 2, 2, 3]` and value:

```prettyprint x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] - ```

This operation, for block_size of 2, will return the following tensor of shape - `[1, 1, 1, 12]`

```prettyprint - [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] - ```

Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:

```prettyprint - x = [[[[1], [2], [5], [6]], - [[3], [4], [7], [8]], - [[9], [10], [13], [14]], - [[11], [12], [15], [16]]]] - ```

the operator will return the following tensor of shape `[1 2 2 4]`:

```prettyprint - x = [[[[1, 2, 3, 4], - [5, 6, 7, 8]], - [[9, 10, 11, 12], - [13, 14, 15, 16]]]] - ```

cropAndResizeGradBoxes Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 Float

grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

-> Tensor v2 t

image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. - Both image_height and image_width need to be positive.

-> Tensor v3 Float

boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor - specifies the coordinates of a box in the `box_ind[i]` image and is specified - in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of - y is mapped to the image coordinate at `y * (image_height - 1)`, so as the - `[0, 1]` interval of normalized image height is mapped to - `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in - which case the sampled crop is an up-down flipped version of the original - image. The width dimension is treated similarly. Normalized coordinates - outside the `[0, 1]` range are allowed, in which case we use - extrapolation_value to extrapolate the input image values.

-> Tensor v4 Int32

box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - The value of `box_ind[i]` specifies the image that the i-th box refers to.

-> Tensor Value Float

output: A 2-D tensor of shape `[num_boxes, 4]`.

Computes the gradient of the crop_and_resize op wrt the input boxes tensor.

batchToSpaceND Source

Arguments

:: (TensorType t, TensorType tblock_shape, OneOf `[Int32, Int64]` tblock_shape, TensorType tcrops, OneOf `[Int32, Int64]` tcrops) 
=> Tensor v1 t

input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, - where spatial_shape has M dimensions.

-> Tensor v2 tblock_shape

block_shape: 1-D with shape `[M]`, all values must be >= 1.

-> Tensor v3 tcrops

crops: 2-D with shape `[M, 2]`, all values must be >= 0. + ```

  1. For the following input of shape `[4, 2, 2, 1]` and block_size of 2:

```prettyprint + x = [[[[1], [3]], [[5], [7]]], + [[[2], [4]], [[10], [12]]], + [[[5], [7]], [[13], [15]]], + [[[6], [8]], [[14], [16]]]] + ```

The output tensor has shape `[1, 4, 4, 1]` and value:

```prettyprint + x = [[[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]]] + ```

  1. For the following input of shape `[8, 1, 2, 1]` and block_size of 2:

```prettyprint + x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + ```

The output tensor has shape `[2, 2, 4, 1]` and value:

```prettyprint + x = [[[[1], [3]], [[5], [7]]], + [[[2], [4]], [[10], [12]]], + [[[5], [7]], [[13], [15]]], + [[[6], [8]], [[14], [16]]]] + ```

batchToSpaceND

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tcrops) 
=> Tensor v'1 t

input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + where spatial_shape has M dimensions.

-> Tensor v'2 tblock_shape

block_shape: 1-D with shape `[M]`, all values must be >= 1.

-> Tensor v'3 tcrops

crops: 2-D with shape `[M, 2]`, all values must be >= 0. `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input dimension `i + 1`, which corresponds to spatial dimension i. It is required that @@ -554,12 +327,2414 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] - ```

-> Tensor Value t

output

BatchToSpace for N-D tensors of type T.

This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape + ```

-> Tensor Build t

output

BatchToSpace for N-D tensors of type T.

This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape `block_shape + [batch]`, interleaves these blocks back into the grid defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as the input. The spatial dimensions of this intermediate result are then optionally cropped according to crops to produce the output. This is the - reverse of SpaceToBatch. See below for a precise description.

spaceToBatch Source

Arguments

:: (TensorType t, TensorType tpaddings, OneOf `[Int32, Int64]` tpaddings) 
=> Int64

block_size

-> Tensor v1 t

input: 4-D with shape `[batch, height, width, depth]`.

-> Tensor v2 tpaddings

paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + reverse of SpaceToBatch. See below for a precise description.

batchToSpaceND'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tcrops) 
=> OpParams 
-> Tensor v'1 t

input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + where spatial_shape has M dimensions.

-> Tensor v'2 tblock_shape

block_shape: 1-D with shape `[M]`, all values must be >= 1.

-> Tensor v'3 tcrops

crops: 2-D with shape `[M, 2]`, all values must be >= 0. + `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input + dimension `i + 1`, which corresponds to spatial dimension i. It is + required that + `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.

This operation is equivalent to the following steps:

  1. Reshape input to reshaped of shape: + [block_shape[0], ..., block_shape[M-1], + batch / prod(block_shape), + input_shape[1], ..., input_shape[N-1]]
  2. Permute dimensions of reshaped to produce permuted of shape + [batch / prod(block_shape),

input_shape[1], block_shape[0], + ..., + input_shape[M], block_shape[M-1],

input_shape[M+1], ..., input_shape[N-1]]

  1. Reshape permuted to produce reshaped_permuted of shape + [batch / prod(block_shape),

input_shape[1] * block_shape[0], + ..., + input_shape[M] * block_shape[M-1],

input_shape[M+1], + ..., + input_shape[N-1]]

  1. Crop the start and end of dimensions `[1, ..., M]` of + reshaped_permuted according to crops to produce the output of shape: + [batch / prod(block_shape),

input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], + ..., + input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],

input_shape[M+1], ..., input_shape[N-1]]

Some examples:

  1. For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and + `crops = [[0, 0], [0, 0]]`:

```prettyprint + [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + ```

The output tensor has shape `[1, 2, 2, 1]` and value:

```prettyprint + x = [[[[1], [2]], [[3], [4]]]] + ```

  1. For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and + `crops = [[0, 0], [0, 0]]`:

```prettyprint + [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + ```

The output tensor has shape `[1, 2, 2, 3]` and value:

```prettyprint + x = [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]] + ```

  1. For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and + `crops = [[0, 0], [0, 0]]`:

```prettyprint + x = [[[[1], [3]], [[5], [7]]], + [[[2], [4]], [[10], [12]]], + [[[5], [7]], [[13], [15]]], + [[[6], [8]], [[14], [16]]]] + ```

The output tensor has shape `[1, 4, 4, 1]` and value:

```prettyprint + x = [[[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]]] + ```

  1. For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and + `crops = [[0, 0], [2, 0]]`:

```prettyprint + x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + [[[0], [2], [4]]], [[[0], [10], [12]]], + [[[0], [5], [7]]], [[[0], [13], [15]]], + [[[0], [6], [8]]], [[[0], [14], [16]]]] + ```

The output tensor has shape `[2, 2, 4, 1]` and value:

```prettyprint + x = [[[[1], [2], [3], [4]], + [[5], [6], [7], [8]]], + [[[9], [10], [11], [12]], + [[13], [14], [15], [16]]]] + ```

-> Tensor Build t

output

betainc

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

a

-> Tensor v'2 t

b

-> Tensor v'3 t

x

-> Tensor Build t

z

Compute the regularized incomplete beta integral \(I_x(a, b)\).

The regularized incomplete beta integral is defined as:

``` + I_x(a, b) = frac{B(x; a, b)}{B(a, b)} + ``` + where

``` + B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt + ```

is the incomplete beta function and \(B(a, b)\) is the *complete* + beta function.

betainc'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

a

-> Tensor v'2 t

b

-> Tensor v'3 t

x

-> Tensor Build t

z

biasAdd

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

value: Any number of dimensions.

-> Tensor v'2 t

bias: 1-D with size the last dimension of value.

-> Tensor Build t

output: Broadcasted sum of value and bias.

Adds bias to value.

This is a special case of `tf.add` where bias is restricted to be 1-D. + Broadcasting is supported, so value may have any number of dimensions.

biasAdd'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

value: Any number of dimensions.

-> Tensor v'2 t

bias: 1-D with size the last dimension of value.

-> Tensor Build t

output: Broadcasted sum of value and bias.

biasAddGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

out_backprop: Any number of dimensions.

-> Tensor Build t

output: 1-D with size the feature dimension of out_backprop.

The backward operation for BiasAdd on the "bias" tensor.

It accumulates all the values from out_backprop into the feature dimension. + For NHWC data format, the feature dimension is the last. For NCHW data format, + the feature dimension is the third-to-last.

biasAddGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

out_backprop: Any number of dimensions.

-> Tensor Build t

output: 1-D with size the feature dimension of out_backprop.

biasAddV1

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

value: Any number of dimensions.

-> Tensor v'2 t

bias: 1-D with size the last dimension of value.

-> Tensor Build t

output: Broadcasted sum of value and bias.

Adds bias to value.

This is a deprecated version of BiasAdd and will be soon removed.

This is a special case of `tf.add` where bias is restricted to be 1-D. + Broadcasting is supported, so value may have any number of dimensions.

biasAddV1'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

value: Any number of dimensions.

-> Tensor v'2 t

bias: 1-D with size the last dimension of value.

-> Tensor Build t

output: Broadcasted sum of value and bias.

bitcast

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` type') 
=> Tensor v'1 t

input

-> Tensor Build type'

output

Bitcasts a tensor from one type to another without copying data.

Given a tensor input, this operation returns a tensor that has the same buffer + data as input with datatype `type`.

If the input datatype T is larger than the output datatype `type` then the + shape changes from [...] to [..., sizeof(T)/sizeof(`type`)].

If T is smaller than `type`, the operator requires that the rightmost + dimension be equal to sizeof(`type`)/sizeof(T). The shape then goes from + [..., sizeof(`type`)/sizeof(T)] to [...].

  • NOTE*: Bitcast is implemented as a low-level cast, so machines with different + endian orderings will give different results.

broadcastArgs

Arguments

:: OneOf `[Int32, Int64]` t 
=> Tensor v'1 t

s0

-> Tensor v'2 t

s1

-> Tensor Build t

r0

Return the shape of s0 op s1 with broadcast.

Given s0 and s1, tensors that represent shapes, compute r0, the + broadcasted shape. s0, s1 and r0 are all integer vectors.

broadcastArgs'

Arguments

:: OneOf `[Int32, Int64]` t 
=> OpParams 
-> Tensor v'1 t

s0

-> Tensor v'2 t

s1

-> Tensor Build t

r0

broadcastGradientArgs

Arguments

:: OneOf `[Int32, Int64]` t 
=> Tensor v'1 t

s0

-> Tensor v'2 t

s1

-> (Tensor Build t, Tensor Build t)

(r0, r1)

  • r0
  • r1

Return the reduction indices for computing gradients of s0 op s1 with broadcast.

This is typically used by gradient computations for a broadcasting operation.

broadcastGradientArgs'

Arguments

:: OneOf `[Int32, Int64]` t 
=> OpParams 
-> Tensor v'1 t

s0

-> Tensor v'2 t

s1

-> (Tensor Build t, Tensor Build t)

(r0, r1)

  • r0
  • r1

cTCBeamSearchDecoder

Arguments

:: Int64

beam_width: A scalar >= 0 (beam search beam width).

-> Int64

top_paths: A scalar >= 0, <= beam_width (controls output size).

-> Tensor v'1 Float

inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

-> Tensor v'2 Int32

sequence_length: A vector containing sequence lengths, size `(batch)`.

-> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float)

(decoded_indices, decoded_values, decoded_shape, log_probability)

  • decoded_indices: A list (length: top_paths) of indices matrices. Matrix j, + size `(total_decoded_outputs[j] x 2)`, has indices of a + `SparseTensor2`. The rows store: [batch, time].
  • decoded_values: A list (length: top_paths) of values vectors. Vector j, + size `(length total_decoded_outputs[j])`, has the values of a + `SparseTensor2`. The vector stores the decoded classes for beam j.
  • decoded_shape: A list (length: top_paths) of shape vector. Vector j, + size `(2)`, stores the shape of the decoded `SparseTensor[j]`. + Its values are: `[batch_size, max_decoded_length[j]]`.
  • log_probability: A matrix, shaped: `(batch_size x top_paths)`. The + sequence log-probabilities.

Performs beam search decoding on the logits given in input.

A note about the attribute merge_repeated: For the beam search decoder, + this means that if consecutive entries in a beam are the same, only + the first of these is emitted. That is, when the top path is "A B B B B", + "A B" is returned if merge_repeated = True but "A B B B B" is + returned if merge_repeated = False.

cTCBeamSearchDecoder'

Arguments

:: OpParams 
-> Int64

beam_width: A scalar >= 0 (beam search beam width).

-> Int64

top_paths: A scalar >= 0, <= beam_width (controls output size).

-> Tensor v'1 Float

inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

-> Tensor v'2 Int32

sequence_length: A vector containing sequence lengths, size `(batch)`.

-> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float)

(decoded_indices, decoded_values, decoded_shape, log_probability)

  • decoded_indices: A list (length: top_paths) of indices matrices. Matrix j, + size `(total_decoded_outputs[j] x 2)`, has indices of a + `SparseTensor2`. The rows store: [batch, time].
  • decoded_values: A list (length: top_paths) of values vectors. Vector j, + size `(length total_decoded_outputs[j])`, has the values of a + `SparseTensor2`. The vector stores the decoded classes for beam j.
  • decoded_shape: A list (length: top_paths) of shape vector. Vector j, + size `(2)`, stores the shape of the decoded `SparseTensor[j]`. + Its values are: `[batch_size, max_decoded_length[j]]`.
  • log_probability: A matrix, shaped: `(batch_size x top_paths)`. The + sequence log-probabilities.

cTCGreedyDecoder

Arguments

:: Tensor v'1 Float

inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

-> Tensor v'2 Int32

sequence_length: A vector containing sequence lengths, size `(batch_size)`.

-> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float)

(decoded_indices, decoded_values, decoded_shape, log_probability)

  • decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`, + of a `SparseTensor2`. The rows store: [batch, time].
  • decoded_values: Values vector, size: `(total_decoded_outputs)`, + of a `SparseTensor2`. The vector stores the decoded classes.
  • decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor. + Values are: `[batch_size, max_decoded_length]`.
  • log_probability: Matrix, size `(batch_size x 1)`, containing sequence + log-probabilities.

Performs greedy decoding on the logits given in inputs.

A note about the attribute merge_repeated: if enabled, when + consecutive logits' maximum indices are the same, only the first of + these is emitted. Labeling the blank *, the sequence "A B B * B B" + becomes "A B" if merge_repeated = True and "A B B B B" if + merge_repeated = False.

Regardless of the value of merge_repeated, if the maximum index of a given + time and batch corresponds to the blank, index `(num_classes - 1)`, no new + element is emitted.

cTCGreedyDecoder'

Arguments

:: OpParams 
-> Tensor v'1 Float

inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

-> Tensor v'2 Int32

sequence_length: A vector containing sequence lengths, size `(batch_size)`.

-> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float)

(decoded_indices, decoded_values, decoded_shape, log_probability)

  • decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`, + of a `SparseTensor2`. The rows store: [batch, time].
  • decoded_values: Values vector, size: `(total_decoded_outputs)`, + of a `SparseTensor2`. The vector stores the decoded classes.
  • decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor. + Values are: `[batch_size, max_decoded_length]`.
  • log_probability: Matrix, size `(batch_size x 1)`, containing sequence + log-probabilities.

cTCLoss

Arguments

:: Tensor v'1 Float

inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

-> Tensor v'2 Int64

labels_indices: The indices of a `SparseTensor2`. + `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for + `(batch b, time t)`.

-> Tensor v'3 Int32

labels_values: The values (labels) associated with the given batch and time.

-> Tensor v'4 Int32

sequence_length: A vector containing sequence lengths (batch).

-> (Tensor Build Float, Tensor Build Float)

(loss, gradient)

  • loss: A vector (batch) containing log-probabilities.
  • gradient: The gradient of loss. 3-D, shape: + `(max_time x batch_size x num_classes)`.

Calculates the CTC Loss (log probability) for each batch entry. Also calculates

the gradient. This class performs the softmax operation for you, so inputs + should be e.g. linear projections of outputs by an LSTM.

cTCLoss'

Arguments

:: OpParams 
-> Tensor v'1 Float

inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

-> Tensor v'2 Int64

labels_indices: The indices of a `SparseTensor2`. + `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for + `(batch b, time t)`.

-> Tensor v'3 Int32

labels_values: The values (labels) associated with the given batch and time.

-> Tensor v'4 Int32

sequence_length: A vector containing sequence lengths (batch).

-> (Tensor Build Float, Tensor Build Float)

(loss, gradient)

  • loss: A vector (batch) containing log-probabilities.
  • gradient: The gradient of loss. 3-D, shape: + `(max_time x batch_size x num_classes)`.

cast

Arguments

:: (TensorType srcT, TensorType dstT) 
=> Tensor v'1 srcT

x

-> Tensor Build dstT

y

Cast x of type SrcT to y of DstT.

cast'

Arguments

:: (TensorType srcT, TensorType dstT) 
=> OpParams 
-> Tensor v'1 srcT

x

-> Tensor Build dstT

y

ceil

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Returns element-wise smallest integer in not less than x.

ceil'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

checkNumerics

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

tensor

-> Tensor Build t

output

Checks a tensor for NaN and Inf values.

When run, reports an InvalidArgument error if tensor has any values + that are not a number (NaN) or infinity (Inf). Otherwise, passes tensor as-is.

checkNumerics'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

tensor

-> Tensor Build t

output

cholesky

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input: Shape is `[..., M, M]`.

-> Tensor Build t

output: Shape is `[..., M, M]`.

Computes the Cholesky decomposition of one or more square matrices.

The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices, with the same constraints as the single matrix Cholesky + decomposition above. The output is a tensor of the same shape as the input + containing the Cholesky decompositions for all input submatrices `[..., :, :]`.

cholesky'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Shape is `[..., M, M]`.

-> Tensor Build t

output: Shape is `[..., M, M]`.

choleskyGrad

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. + Algorithm depends only on lower triangular part of the innermost matrices of + this tensor.

-> Tensor v'2 t

grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. + Algorithm depends only on lower triangular part of the innermost matrices of + this tensor.

-> Tensor Build t

output: Symmetrized version of df/dA . Shape is `[..., M, M]`

Computes the reverse mode backpropagated gradient of the Cholesky algorithm.

For an explanation see "Differentiation of the Cholesky algorithm" by + Iain Murray http://arxiv.org/abs/1602.07527.

choleskyGrad'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. + Algorithm depends only on lower triangular part of the innermost matrices of + this tensor.

-> Tensor v'2 t

grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. + Algorithm depends only on lower triangular part of the innermost matrices of + this tensor.

-> Tensor Build t

output: Symmetrized version of df/dA . Shape is `[..., M, M]`

complex

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Complex Double, Complex Float]` tout) 
=> Tensor v'1 t

real

-> Tensor v'2 t

imag

-> Tensor Build tout

out

Converts two real numbers to a complex number.

Given a tensor real representing the real part of a complex number, and a + tensor imag representing the imaginary part of a complex number, this + operation returns complex numbers elementwise of the form \(a + bj\), where + *a* represents the real part and *b* represents the imag part.

The input tensors real and imag must have the same shape.

For example:

``` + # tensor real is [2.25, 3.25] + # tensor imag is [4.75, 5.75] + tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + ```

complex'

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Complex Double, Complex Float]` tout) 
=> OpParams 
-> Tensor v'1 t

real

-> Tensor v'2 t

imag

-> Tensor Build tout

out

complexAbs

Arguments

:: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
=> Tensor v'1 t

x

-> Tensor Build tout

y

Computes the complex absolute value of a tensor.

Given a tensor x of complex numbers, this operation returns a tensor of type + float or double that is the absolute value of each element in x. All + elements in x must be complex numbers of the form \(a + bj\). The absolute + value is computed as \( sqrt{a^2 + b^2}\).

complexAbs'

Arguments

:: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build tout

y

computeAccidentalHits

Arguments

:: Int64

num_true: Number of true labels per context.

-> Tensor v'1 Int64

true_classes: The true_classes output of UnpackSparseLabels.

-> Tensor v'2 Int64

sampled_candidates: The sampled_candidates output of CandidateSampler.

-> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float)

(indices, ids, weights)

  • indices: A vector of indices corresponding to rows of true_candidates.
  • ids: A vector of IDs of positions in sampled_candidates that match a true_label + for the row with the corresponding index in indices.
  • weights: A vector of the same length as indices and ids, in which each element + is -FLOAT_MAX.

Computes the ids of the positions in sampled_candidates that match true_labels.

When doing log-odds NCE, the result of this op should be passed through a + SparseToDense op, then added to the logits of the sampled candidates. This has + the effect of removing the sampled labels that match the true labels by + making the classifier sure that they are sampled labels.

computeAccidentalHits'

Arguments

:: OpParams 
-> Int64

num_true: Number of true labels per context.

-> Tensor v'1 Int64

true_classes: The true_classes output of UnpackSparseLabels.

-> Tensor v'2 Int64

sampled_candidates: The sampled_candidates output of CandidateSampler.

-> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float)

(indices, ids, weights)

  • indices: A vector of indices corresponding to rows of true_candidates.
  • ids: A vector of IDs of positions in sampled_candidates that match a true_label + for the row with the corresponding index in indices.
  • weights: A vector of the same length as indices and ids, in which each element + is -FLOAT_MAX.

concat

Arguments

:: TensorType t 
=> Tensor v'1 Int32

concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

-> [Tensor v'2 t]

values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

-> Tensor Build t

output: A Tensor with the concatenation of values stacked along the + concat_dim dimension. This tensor's shape matches that of values except + in concat_dim where it has the sum of the sizes.

Concatenates tensors along one dimension.

concat'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 Int32

concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

-> [Tensor v'2 t]

values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

-> Tensor Build t

output: A Tensor with the concatenation of values stacked along the + concat_dim dimension. This tensor's shape matches that of values except + in concat_dim where it has the sum of the sizes.

concatOffset

Arguments

:: Tensor v'1 Int32

concat_dim: The dimension along which to concatenate.

-> [Tensor v'2 Int32]

shape: The N int32 vectors representing shape of tensors being concatenated.

-> [Tensor Build Int32]

offset: The N int32 vectors representing the starting offset + of input tensors within the concatenated output.

This is typically used by gradient computations for a concat operation.

Computes offsets of concat inputs within its output.

For example:

```prettyprint + # x is [2, 2, 7] + # y is [2, 3, 7] + # z is [2, 5, 7] + concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0] + ```

concatOffset'

Arguments

:: OpParams 
-> Tensor v'1 Int32

concat_dim: The dimension along which to concatenate.

-> [Tensor v'2 Int32]

shape: The N int32 vectors representing shape of tensors being concatenated.

-> [Tensor Build Int32]

offset: The N int32 vectors representing the starting offset + of input tensors within the concatenated output.

This is typically used by gradient computations for a concat operation.

concatV2

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tidx) 
=> [Tensor v'1 t]

values: List of N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

-> Tensor v'2 tidx

axis: 0-D. The dimension along which to concatenate. Must be in the + range [-rank(values), rank(values)).

-> Tensor Build t

output: A Tensor with the concatenation of values stacked along the + concat_dim dimension. This tensor's shape matches that of values except + in concat_dim where it has the sum of the sizes.

Concatenates tensors along one dimension.

concatV2'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> [Tensor v'1 t]

values: List of N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

-> Tensor v'2 tidx

axis: 0-D. The dimension along which to concatenate. Must be in the + range [-rank(values), rank(values)).

-> Tensor Build t

output: A Tensor with the concatenation of values stacked along the + concat_dim dimension. This tensor's shape matches that of values except + in concat_dim where it has the sum of the sizes.

conditionalAccumulator

Arguments

:: MonadBuild m' 
=> DataType

dtype: The type of the value being accumulated.

-> Shape

shape: The shape of the values, can be [], in which case shape is unknown.

-> m' (Tensor Ref ByteString)

handle: The handle to the accumulator.

A conditional accumulator for aggregating gradients. The accumulator accepts

gradients marked with local_step greater or equal to the most recent global_step + known to the accumulator. The average can be extracted from the accumulator, + provided sufficient gradients have been accumulated. Extracting the average + automatically resets the aggregate to 0, and increments the global_step recorded + by the accumulator.

conditionalAccumulator'

Arguments

:: MonadBuild m' 
=> OpParams 
-> DataType

dtype: The type of the value being accumulated.

-> Shape

shape: The shape of the values, can be [], in which case shape is unknown.

-> m' (Tensor Ref ByteString)

handle: The handle to the accumulator.

conj

Arguments

:: OneOf `[Complex Double, Complex Float]` t 
=> Tensor v'1 t

input

-> Tensor Build t

output

Returns the complex conjugate of a complex number.

Given a tensor input of complex numbers, this operation returns a tensor of + complex numbers that are the complex conjugate of each element in input. The + complex numbers in input must be of the form \(a + bj\), where *a* is the + real part and *b* is the imaginary part.

The complex conjugate returned by this operation is of the form \(a - bj\).

For example:

``` + # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] + tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] + ```

conj'

Arguments

:: OneOf `[Complex Double, Complex Float]` t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build t

output

const

Arguments

:: TensorType dtype 
=> Tensor Build dtype

output

Returns a constant tensor.

const'

Arguments

:: TensorType dtype 
=> OpParams 
-> Tensor Build dtype

output

controlTrigger :: forall m'. MonadBuild m' => m' ControlNode

Does nothing. Serves as a control trigger for scheduling.

Only useful as a placeholder for control edges.

conv2D

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

input

-> Tensor v'2 t

filter

-> Tensor Build t

output

Computes a 2-D convolution given 4-D input and filter tensors.

Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + and a filter / kernel tensor of shape + `[filter_height, filter_width, in_channels, out_channels]`, this op + performs the following:

  1. Flattens the filter to a 2-D matrix with shape + `[filter_height * filter_width * in_channels, output_channels]`.
  2. Extracts image patches from the input tensor to form a *virtual* + tensor of shape `[batch, out_height, out_width, + filter_height * filter_width * in_channels]`.
  3. For each patch, right-multiplies the filter matrix and the image patch + vector.

In detail, with the default NHWC format,

output[b, i, j, k] = + sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * + filter[di, dj, q, k]

Must have `strides[0] = strides[3] = 1`. For the most common case of the same + horizontal and vertices strides, `strides = [1, stride, stride, 1]`.

conv2D'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 t

filter

-> Tensor Build t

output

conv2DBackpropFilter

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v'2 Int32

filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 4-D + `[filter_height, filter_width, in_channels, out_channels]` tensor.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

-> Tensor Build t

output: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. + the filter input of the convolution.

Computes the gradients of convolution with respect to the filter.

conv2DBackpropFilter'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v'2 Int32

filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 4-D + `[filter_height, filter_width, in_channels, out_channels]` tensor.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

-> Tensor Build t

output: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. + the filter input of the convolution.

conv2DBackpropInput

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 Int32

input_sizes: An integer vector representing the shape of input, + where input is a 4-D `[batch, height, width, channels]` tensor.

-> Tensor v'2 t

filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

-> Tensor Build t

output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient + w.r.t. the input of the convolution.

Computes the gradients of convolution with respect to the input.

conv2DBackpropInput'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int32

input_sizes: An integer vector representing the shape of input, + where input is a 4-D `[batch, height, width, channels]` tensor.

-> Tensor v'2 t

filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

-> Tensor Build t

output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient + w.r.t. the input of the convolution.

conv3D

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.

-> Tensor v'2 t

filter: Shape `[filter_depth, filter_height, filter_width, in_channels, + out_channels]`. in_channels must match between input and filter.

-> Tensor Build t

output

Computes a 3-D convolution given 5-D input and filter tensors.

In signal processing, cross-correlation is a measure of similarity of + two waveforms as a function of a time-lag applied to one of them. This + is also known as a sliding dot product or sliding inner-product.

Our Conv3D implements a form of cross-correlation.

conv3D'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.

-> Tensor v'2 t

filter: Shape `[filter_depth, filter_height, filter_width, in_channels, + out_channels]`. in_channels must match between input and filter.

-> Tensor Build t

output

conv3DBackpropFilter

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

input: Shape `[batch, depth, rows, cols, in_channels]`.

-> Tensor v'2 t

filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

-> Tensor v'3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

-> Tensor Build t

output

Computes the gradients of 3-D convolution with respect to the filter.

conv3DBackpropFilter'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Shape `[batch, depth, rows, cols, in_channels]`.

-> Tensor v'2 t

filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

-> Tensor v'3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

-> Tensor Build t

output

conv3DBackpropFilterV2

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

input: Shape `[batch, depth, rows, cols, in_channels]`.

-> Tensor v'2 Int32

filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 5-D + `[filter_depth, filter_height, filter_width, in_channels, out_channels]` + tensor.

-> Tensor v'3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

-> Tensor Build t

output

Computes the gradients of 3-D convolution with respect to the filter.

conv3DBackpropFilterV2'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Shape `[batch, depth, rows, cols, in_channels]`.

-> Tensor v'2 Int32

filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 5-D + `[filter_depth, filter_height, filter_width, in_channels, out_channels]` + tensor.

-> Tensor v'3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

-> Tensor Build t

output

conv3DBackpropInput

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

input: Shape `[batch, depth, rows, cols, in_channels]`.

-> Tensor v'2 t

filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

-> Tensor v'3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

-> Tensor Build t

output

Computes the gradients of 3-D convolution with respect to the input.

conv3DBackpropInput'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Shape `[batch, depth, rows, cols, in_channels]`.

-> Tensor v'2 t

filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

-> Tensor v'3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

-> Tensor Build t

output

conv3DBackpropInputV2

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 Int32

input_sizes: An integer vector representing the tensor shape of input, + where input is a 5-D + `[batch, depth, rows, cols, in_channels]` tensor.

-> Tensor v'2 t

filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

-> Tensor v'3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

-> Tensor Build t

output

Computes the gradients of 3-D convolution with respect to the input.

conv3DBackpropInputV2'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int32

input_sizes: An integer vector representing the tensor shape of input, + where input is a 5-D + `[batch, depth, rows, cols, in_channels]` tensor.

-> Tensor v'2 t

filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + in_channels must match between input and filter.

-> Tensor v'3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + out_channels]`.

-> Tensor Build t

output

copy

Arguments

:: TensorType t 
=> Tensor v'1 t

input: Input tensor.

-> Tensor Build t

output: Output tensor, deep-copied from input.

Copy Op.

Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the + device on which the tensor is allocated.

Unlike the CopyHost Op, this op does not have HostMemory constraint on its + input or output.

copy'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input: Input tensor.

-> Tensor Build t

output: Output tensor, deep-copied from input.

copyHost

Arguments

:: TensorType t 
=> Tensor v'1 t

input: Input tensor.

-> Tensor Build t

output: Output tensor, deep-copied from input.

Copy Host Op.

Performs CPU-to-CPU deep-copying of tensor.

Unlike the Copy Op, this op has HostMemory constraint on its input or output.

copyHost'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input: Input tensor.

-> Tensor Build t

output: Output tensor, deep-copied from input.

cos

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes cos of x element-wise.

cos'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

countUpTo

Arguments

:: (MonadBuild m', OneOf `[Int32, Int64]` t) 
=> Int64

limit: If incrementing ref would bring it above limit, instead generates an + OutOfRange error.

-> Tensor Ref t

ref: Should be from a scalar Variable node.

-> m' (Tensor Value t)

output: A copy of the input before increment. If nothing else modifies the + input, the values produced will all be distinct.

Increments ref until it reaches limit.

countUpTo'

Arguments

:: (MonadBuild m', OneOf `[Int32, Int64]` t) 
=> OpParams 
-> Int64

limit: If incrementing ref would bring it above limit, instead generates an + OutOfRange error.

-> Tensor Ref t

ref: Should be from a scalar Variable node.

-> m' (Tensor Value t)

output: A copy of the input before increment. If nothing else modifies the + input, the values produced will all be distinct.

cropAndResize

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + Both image_height and image_width need to be positive.

-> Tensor v'2 Float

boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + specifies the coordinates of a box in the `box_ind[i]` image and is specified + in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + y is mapped to the image coordinate at `y * (image_height - 1)`, so as the + `[0, 1]` interval of normalized image height is mapped to + `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + which case the sampled crop is an up-down flipped version of the original + image. The width dimension is treated similarly. Normalized coordinates + outside the `[0, 1]` range are allowed, in which case we use + extrapolation_value to extrapolate the input image values.

-> Tensor v'3 Int32

box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

-> Tensor v'4 Int32

crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All + cropped image patches are resized to this size. The aspect ratio of the image + content is not preserved. Both crop_height and crop_width need to be + positive.

-> Tensor Build Float

crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

Extracts crops from the input image tensor and bilinearly resizes them (possibly

with aspect ratio change) to a common output size specified by crop_size. This + is more general than the crop_to_bounding_box op which extracts a fixed size + slice from the input image and does not allow resizing or aspect ratio change.

Returns a tensor with crops from the input image at positions defined at the + bounding box locations in boxes. The cropped boxes are all resized (with + bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The + result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.

cropAndResize'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + Both image_height and image_width need to be positive.

-> Tensor v'2 Float

boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + specifies the coordinates of a box in the `box_ind[i]` image and is specified + in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + y is mapped to the image coordinate at `y * (image_height - 1)`, so as the + `[0, 1]` interval of normalized image height is mapped to + `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + which case the sampled crop is an up-down flipped version of the original + image. The width dimension is treated similarly. Normalized coordinates + outside the `[0, 1]` range are allowed, in which case we use + extrapolation_value to extrapolate the input image values.

-> Tensor v'3 Int32

box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

-> Tensor v'4 Int32

crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All + cropped image patches are resized to this size. The aspect ratio of the image + content is not preserved. Both crop_height and crop_width need to be + positive.

-> Tensor Build Float

crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

cropAndResizeGradBoxes

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 Float

grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

-> Tensor v'2 t

image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + Both image_height and image_width need to be positive.

-> Tensor v'3 Float

boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + specifies the coordinates of a box in the `box_ind[i]` image and is specified + in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + y is mapped to the image coordinate at `y * (image_height - 1)`, so as the + `[0, 1]` interval of normalized image height is mapped to + `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + which case the sampled crop is an up-down flipped version of the original + image. The width dimension is treated similarly. Normalized coordinates + outside the `[0, 1]` range are allowed, in which case we use + extrapolation_value to extrapolate the input image values.

-> Tensor v'4 Int32

box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

-> Tensor Build Float

output: A 2-D tensor of shape `[num_boxes, 4]`.

Computes the gradient of the crop_and_resize op wrt the input boxes tensor.

cropAndResizeGradBoxes'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Float

grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

-> Tensor v'2 t

image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + Both image_height and image_width need to be positive.

-> Tensor v'3 Float

boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + specifies the coordinates of a box in the `box_ind[i]` image and is specified + in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + y is mapped to the image coordinate at `y * (image_height - 1)`, so as the + `[0, 1]` interval of normalized image height is mapped to + `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + which case the sampled crop is an up-down flipped version of the original + image. The width dimension is treated similarly. Normalized coordinates + outside the `[0, 1]` range are allowed, in which case we use + extrapolation_value to extrapolate the input image values.

-> Tensor v'4 Int32

box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

-> Tensor Build Float

output: A 2-D tensor of shape `[num_boxes, 4]`.

cropAndResizeGradImage

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 Float

grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

-> Tensor v'2 Float

boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + specifies the coordinates of a box in the `box_ind[i]` image and is specified + in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + y is mapped to the image coordinate at `y * (image_height - 1)`, so as the + `[0, 1]` interval of normalized image height is mapped to + `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + which case the sampled crop is an up-down flipped version of the original + image. The width dimension is treated similarly. Normalized coordinates + outside the `[0, 1]` range are allowed, in which case we use + extrapolation_value to extrapolate the input image values.

-> Tensor v'3 Int32

box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

-> Tensor v'4 Int32

image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]` + containing the original image size. Both image_height and image_width need + to be positive.

-> Tensor Build t

output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.

Computes the gradient of the crop_and_resize op wrt the input image tensor.

cropAndResizeGradImage'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Float

grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

-> Tensor v'2 Float

boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor + specifies the coordinates of a box in the `box_ind[i]` image and is specified + in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + y is mapped to the image coordinate at `y * (image_height - 1)`, so as the + `[0, 1]` interval of normalized image height is mapped to + `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + which case the sampled crop is an up-down flipped version of the original + image. The width dimension is treated similarly. Normalized coordinates + outside the `[0, 1]` range are allowed, in which case we use + extrapolation_value to extrapolate the input image values.

-> Tensor v'3 Int32

box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + The value of `box_ind[i]` specifies the image that the i-th box refers to.

-> Tensor v'4 Int32

image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]` + containing the original image size. Both image_height and image_width need + to be positive.

-> Tensor Build t

output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.

cross

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

a: A tensor containing 3-element vectors.

-> Tensor v'2 t

b: Another tensor, of same type and shape as a.

-> Tensor Build t

product: Pairwise cross product of the vectors in a and b.

Compute the pairwise cross product.

a and b must be the same shape; they can either be simple 3-element vectors, + or any shape where the innermost dimension is 3. In the latter case, each pair + of corresponding 3-element vectors is cross-multiplied independently.

cross'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

a: A tensor containing 3-element vectors.

-> Tensor v'2 t

b: Another tensor, of same type and shape as a.

-> Tensor Build t

product: Pairwise cross product of the vectors in a and b.

cumprod

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

x

-> Tensor v'2 tidx

axis

-> Tensor Build t

out

Compute the cumulative product of the tensor x along axis.

By default, this op performs an inclusive cumprod, which means that the first + element of the input is identical to the first element of the output: + ```prettyprint + tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c] + ```

By setting the exclusive kwarg to True, an exclusive cumprod is + performed instead: + ```prettyprint + tf.cumprod([a, b, c], exclusive=True) ==> [0, a, a * b] + ```

By setting the reverse kwarg to True, the cumprod is performed in the + opposite direction: + ```prettyprint + tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c] + ``` + This is more efficient than using separate `tf.reverse` ops.

The reverse and exclusive kwargs can also be combined: + ```prettyprint + tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 0] + ```

cumprod'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 tidx

axis

-> Tensor Build t

out

cumsum

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

x

-> Tensor v'2 tidx

axis

-> Tensor Build t

out

Compute the cumulative sum of the tensor x along axis.

By default, this op performs an inclusive cumsum, which means that the first + element of the input is identical to the first element of the output: + ```prettyprint + tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c] + ```

By setting the exclusive kwarg to True, an exclusive cumsum is + performed instead: + ```prettyprint + tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b] + ```

By setting the reverse kwarg to True, the cumsum is performed in the + opposite direction: + ```prettyprint + tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c] + ``` + This is more efficient than using separate `tf.reverse` ops.

The reverse and exclusive kwargs can also be combined: + ```prettyprint + tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0] + ```

cumsum'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 tidx

axis

-> Tensor Build t

out

debugIdentity

Arguments

:: TensorType t 
=> Tensor v'1 t

input: Input tensor, non-Reference type.

-> Tensor Build t

output: Output tensor that equals the input tensor.

Debug Identity Op.

Provides an identity mapping of the non-Ref type input tensor for debugging.

debugIdentity'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input: Input tensor, non-Reference type.

-> Tensor Build t

output: Output tensor that equals the input tensor.

debugNanCount

Arguments

:: TensorType t 
=> Tensor v'1 t

input: Input tensor, non-Reference type.

-> Tensor Build Int64

output: An integer output tensor that is the number of NaNs in the input.

Debug NaN Value Counter Op

Counts number of NaNs in the input tensor, for debugging.

debugNanCount'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input: Input tensor, non-Reference type.

-> Tensor Build Int64

output: An integer output tensor that is the number of NaNs in the input.

debugNumericSummary

Arguments

:: TensorType t 
=> Tensor v'1 t

input: Input tensor, non-Reference type, float or double.

-> Tensor Build Double

output: A double tensor of shape [12], the elements of which are: + [0]: is initialized (1.0) or not (0.0). + [1]: total number of elements + [2]: -inf count + [3]: negative element count (excluding -inf) + [4]: zero element count + [5]: positive element count (excluding +inf) + [6]: +inf element count + [7]: NaN element count + Output elements [1:8] are all zero, if the tensor is uninitialized. + [8]: minimum of all non-inf and non-NaN elements. + If uninitialized or no such element exists: +inf. + [9]: maximum of all non-inf and non-NaN elements. + If uninitialized or no such element exists: -inf. + [10]: mean of all non-inf and non-NaN elements. + If uninitialized or no such element exists: NaN. + [11]: variance of all non-inf and non-NaN elements. + If uninitialized or no such element exists: NaN.

Debug Numeric Summary Op.

Provide a basic summary of numeric value types, range and distribution.

debugNumericSummary'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input: Input tensor, non-Reference type, float or double.

-> Tensor Build Double

output: A double tensor of shape [12], the elements of which are: + [0]: is initialized (1.0) or not (0.0). + [1]: total number of elements + [2]: -inf count + [3]: negative element count (excluding -inf) + [4]: zero element count + [5]: positive element count (excluding +inf) + [6]: +inf element count + [7]: NaN element count + Output elements [1:8] are all zero, if the tensor is uninitialized. + [8]: minimum of all non-inf and non-NaN elements. + If uninitialized or no such element exists: +inf. + [9]: maximum of all non-inf and non-NaN elements. + If uninitialized or no such element exists: -inf. + [10]: mean of all non-inf and non-NaN elements. + If uninitialized or no such element exists: NaN. + [11]: variance of all non-inf and non-NaN elements. + If uninitialized or no such element exists: NaN.

decodeBase64

Arguments

:: Tensor v'1 ByteString

input: Base64 strings to decode.

-> Tensor Build ByteString

output: Decoded strings.

Decode web-safe base64-encoded strings.

Input may or may not have padding at the end. See EncodeBase64 for padding. + Web-safe means that input must use - and _ instead of + and /.

decodeBase64'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

input: Base64 strings to decode.

-> Tensor Build ByteString

output: Decoded strings.

decodeCSV

Arguments

:: OneOfs `[ByteString, Int32, Int64, Float]` oUT_TYPE 
=> Tensor v'1 ByteString

records: Each string is a record/row in the csv and all records should have + the same format.

-> TensorList v'2 oUT_TYPE

record_defaults: One tensor per column of the input record, with either a + scalar default value for that column or empty if the column is required.

-> TensorList Build oUT_TYPE

output: Each tensor will have the same shape as records.

Convert CSV records to tensors. Each column maps to one tensor.

RFC 4180 format is expected for the CSV records. + (https:/tools.ietf.orghtml/rfc4180) + Note that we allow leading and trailing spaces with int or float field.

decodeCSV'

Arguments

:: OneOfs `[ByteString, Int32, Int64, Float]` oUT_TYPE 
=> OpParams 
-> Tensor v'1 ByteString

records: Each string is a record/row in the csv and all records should have + the same format.

-> TensorList v'2 oUT_TYPE

record_defaults: One tensor per column of the input record, with either a + scalar default value for that column or empty if the column is required.

-> TensorList Build oUT_TYPE

output: Each tensor will have the same shape as records.

decodeGif

Arguments

:: Tensor v'1 ByteString

contents: 0-D. The GIF-encoded image.

-> Tensor Build Word8

image: 4-D with shape `[num_frames, height, width, 3]`. RGB order

Decode the first frame of a GIF-encoded image to a uint8 tensor.

GIF with frame or transparency compression are not supported + convert animated GIF from compressed to uncompressed by:

convert $src.gif -coalesce $dst.gif

decodeGif'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

contents: 0-D. The GIF-encoded image.

-> Tensor Build Word8

image: 4-D with shape `[num_frames, height, width, 3]`. RGB order

decodeJSONExample

Arguments

:: Tensor v'1 ByteString

json_examples: Each string is a JSON object serialized according to the JSON + mapping of the Example proto.

-> Tensor Build ByteString

binary_examples: Each string is a binary Example protocol buffer corresponding + to the respective element of json_examples.

Convert JSON-encoded Example records to binary protocol buffer strings.

This op translates a tensor containing Example records, encoded using + the standard JSON + mapping, + into a tensor containing the same records encoded as binary protocol + buffers. The resulting tensor can then be fed to any of the other + Example-parsing ops.

decodeJSONExample'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

json_examples: Each string is a JSON object serialized according to the JSON + mapping of the Example proto.

-> Tensor Build ByteString

binary_examples: Each string is a binary Example protocol buffer corresponding + to the respective element of json_examples.

decodeJpeg

Arguments

:: Tensor v'1 ByteString

contents: 0-D. The JPEG-encoded image.

-> Tensor Build Word8

image: 3-D with shape `[height, width, channels]`..

Decode a JPEG-encoded image to a uint8 tensor.

The attr channels indicates the desired number of color channels for the + decoded image.

Accepted values are:

  • 0: Use the number of channels in the JPEG-encoded image.
  • 1: output a grayscale image.
  • 3: output an RGB image.

If needed, the JPEG-encoded image is transformed to match the requested number + of color channels.

The attr ratio allows downscaling the image by an integer factor during + decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than + downscaling the image later.

decodeJpeg'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

contents: 0-D. The JPEG-encoded image.

-> Tensor Build Word8

image: 3-D with shape `[height, width, channels]`..

decodePng

Arguments

:: OneOf `[Word16, Word8]` dtype 
=> Tensor v'1 ByteString

contents: 0-D. The PNG-encoded image.

-> Tensor Build dtype

image: 3-D with shape `[height, width, channels]`.

Decode a PNG-encoded image to a uint8 or uint16 tensor.

The attr channels indicates the desired number of color channels for the + decoded image.

Accepted values are:

  • 0: Use the number of channels in the PNG-encoded image.
  • 1: output a grayscale image.
  • 3: output an RGB image.
  • 4: output an RGBA image.

If needed, the PNG-encoded image is transformed to match the requested number + of color channels.

decodePng'

Arguments

:: OneOf `[Word16, Word8]` dtype 
=> OpParams 
-> Tensor v'1 ByteString

contents: 0-D. The PNG-encoded image.

-> Tensor Build dtype

image: 3-D with shape `[height, width, channels]`.

decodeRaw

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` out_type 
=> Tensor v'1 ByteString

bytes: All the elements must have the same length.

-> Tensor Build out_type

output: A Tensor with one more dimension than the input bytes. The + added dimension will have size equal to the length of the elements + of bytes divided by the number of bytes to represent out_type.

Reinterpret the bytes of a string as a vector of numbers.

decodeRaw'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` out_type 
=> OpParams 
-> Tensor v'1 ByteString

bytes: All the elements must have the same length.

-> Tensor Build out_type

output: A Tensor with one more dimension than the input bytes. The + added dimension will have size equal to the length of the elements + of bytes divided by the number of bytes to represent out_type.

deleteSessionTensor

Arguments

:: MonadBuild m' 
=> Tensor v'1 ByteString

handle: The handle for a tensor stored in the session state.

-> m' ControlNode 

Delete the tensor specified by its handle in the session.

deleteSessionTensor'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor v'1 ByteString

handle: The handle for a tensor stored in the session state.

-> m' ControlNode 

denseToDenseSetOperation

Arguments

:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
=> Tensor v'1 t

set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. + Dimension n contains values in a set, duplicates are allowed but ignored.

-> Tensor v'2 t

set2: Tensor with rank n. 1st `n-1` dimensions must be the same as set1. + Dimension n contains values in a set, duplicates are allowed but ignored.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(result_indices, result_values, result_shape)

  • result_indices: 2D indices of a SparseTensor.
  • result_values: 1D values of a SparseTensor.
  • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` + is the max result set size across all `0...n-1` dimensions.

Applies set operation along last dimension of 2 Tensor inputs.

See SetOperationOp::SetOperationFromContext for values of set_operation.

Output result is a SparseTensor represented by result_indices, + result_values, and result_shape. For set1 and set2 ranked n, this + has rank n and the same 1st `n-1` dimensions as set1 and set2. The nth + dimension contains the result of set_operation applied to the corresponding + `[0...n-1]` dimension of set.

denseToDenseSetOperation'

Arguments

:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
=> OpParams 
-> Tensor v'1 t

set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. + Dimension n contains values in a set, duplicates are allowed but ignored.

-> Tensor v'2 t

set2: Tensor with rank n. 1st `n-1` dimensions must be the same as set1. + Dimension n contains values in a set, duplicates are allowed but ignored.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(result_indices, result_values, result_shape)

  • result_indices: 2D indices of a SparseTensor.
  • result_values: 1D values of a SparseTensor.
  • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` + is the max result set size across all `0...n-1` dimensions.

denseToSparseSetOperation

Arguments

:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
=> Tensor v'1 t

set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. + Dimension n contains values in a set, duplicates are allowed but ignored.

-> Tensor v'2 Int64

set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

-> Tensor v'3 t

set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

-> Tensor v'4 Int64

set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must + be the same as the 1st `n-1` dimensions of set1, `result_shape[n]` is the + max set size across `n-1` dimensions.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(result_indices, result_values, result_shape)

  • result_indices: 2D indices of a SparseTensor.
  • result_values: 1D values of a SparseTensor.
  • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` + is the max result set size across all `0...n-1` dimensions.

Applies set operation along last dimension of Tensor and SparseTensor.

See SetOperationOp::SetOperationFromContext for values of set_operation.

Input set2 is a SparseTensor represented by set2_indices, set2_values, + and set2_shape. For set2 ranked n, 1st `n-1` dimensions must be the same + as set1. Dimension n contains values in a set, duplicates are allowed but + ignored.

If validate_indices is True, this op validates the order and range of set2 + indices.

Output result is a SparseTensor represented by result_indices, + result_values, and result_shape. For set1 and set2 ranked n, this + has rank n and the same 1st `n-1` dimensions as set1 and set2. The nth + dimension contains the result of set_operation applied to the corresponding + `[0...n-1]` dimension of set.

denseToSparseSetOperation'

Arguments

:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
=> OpParams 
-> Tensor v'1 t

set1: Tensor with rank n. 1st `n-1` dimensions must be the same as set2. + Dimension n contains values in a set, duplicates are allowed but ignored.

-> Tensor v'2 Int64

set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

-> Tensor v'3 t

set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

-> Tensor v'4 Int64

set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must + be the same as the 1st `n-1` dimensions of set1, `result_shape[n]` is the + max set size across `n-1` dimensions.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(result_indices, result_values, result_shape)

  • result_indices: 2D indices of a SparseTensor.
  • result_values: 1D values of a SparseTensor.
  • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` + is the max result set size across all `0...n-1` dimensions.

depthToSpace

Arguments

:: TensorType t 
=> Int64

block_size: The size of the spatial block, same as in Space2Depth.

-> Tensor v'1 t

input

-> Tensor Build t

output

DepthToSpace for tensors of type T.

Rearranges data from depth into blocks of spatial data. + This is the reverse transformation of SpaceToDepth. More specifically, + this op outputs a copy of the input tensor where values from the depth + dimension are moved in spatial blocks to the height and width dimensions. + The attr block_size indicates the input block size and how the data is moved.

  • Chunks of data of size `block_size * block_size` from depth are rearranged + into non-overlapping blocks of size `block_size x block_size`
  • The width the output tensor is `input_depth * block_size`, whereas the + height is `input_height * block_size`.
  • The depth of the input tensor must be divisible by + `block_size * block_size`.

That is, assuming the input is in the shape: + `[batch, height, width, depth]`, + the shape of the output will be: + `[batch, height*block_size, width*block_size, depth/(block_size*block_size)]`

This operation requires that the input tensor be of rank 4, and that + block_size be >=1 and that `block_size * block_size` be a divisor of the + input depth.

This operation is useful for resizing the activations between convolutions + (but keeping all data), e.g. instead of pooling. It is also useful for training + purely convolutional models.

For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2:

```prettyprint + x = [[[[1, 2, 3, 4]]]]

```

This operation will output a tensor of shape `[1, 2, 2, 1]`:

```prettyprint + [[[[1], [2]], + [[3], [4]]]] + ```

Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, + the corresponding output will have 2x2 elements and will have a depth of + 1 channel (1 = `4 / (block_size * block_size)`). + The output element shape is `[2, 2, 1]`.

For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.

```prettyprint + x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + ```

This operation, for block size of 2, will return the following tensor of shape + `[1, 2, 2, 3]`

```prettyprint + [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]]

```

Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:

```prettyprint + x = [[[[1, 2, 3, 4], + [5, 6, 7, 8]], + [[9, 10, 11, 12], + [13, 14, 15, 16]]]] + ```

the operator will return the following tensor of shape `[1 4 4 1]`:

```prettyprint + x = [[ [1], [2], [5], [6]], + [ [3], [4], [7], [8]], + [ [9], [10], [13], [14]], + [ [11], [12], [15], [16]]]

```

depthToSpace'

Arguments

:: TensorType t 
=> OpParams 
-> Int64

block_size: The size of the spatial block, same as in Space2Depth.

-> Tensor v'1 t

input

-> Tensor Build t

output

depthwiseConv2dNative

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input

-> Tensor v'2 t

filter

-> Tensor Build t

output

Computes a 2-D depthwise convolution given 4-D input and filter tensors.

Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + and a filter / kernel tensor of shape + `[filter_height, filter_width, in_channels, channel_multiplier]`, containing + in_channels convolutional filters of depth 1, depthwise_conv2d applies + a different filter to each input channel (expanding from 1 channel to + channel_multiplier channels for each), then concatenates the results + together. Thus, the output has `in_channels * channel_multiplier` channels.

for k in 0..in_channels-1 + for q in 0..channel_multiplier-1 + output[b, i, j, k * channel_multiplier + q] = + sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * + filter[di, dj, k, q]

Must have `strides[0] = strides[3] = 1`. For the most common case of the same + horizontal and vertices strides, `strides = [1, stride, stride, 1]`.

depthwiseConv2dNative'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 t

filter

-> Tensor Build t

output

depthwiseConv2dNativeBackpropFilter

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v'2 Int32

filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 4-D + `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

-> Tensor Build t

output: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. + the filter input of the convolution.

Computes the gradients of depthwise convolution with respect to the filter.

depthwiseConv2dNativeBackpropFilter'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v'2 Int32

filter_sizes: An integer vector representing the tensor shape of filter, + where filter is a 4-D + `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

-> Tensor Build t

output: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. + the filter input of the convolution.

depthwiseConv2dNativeBackpropInput

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 Int32

input_sizes: An integer vector representing the shape of input, + where input is a 4-D `[batch, height, width, channels]` tensor.

-> Tensor v'2 t

filter: 4-D with shape + `[filter_height, filter_width, in_channels, depthwise_multiplier]`.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

-> Tensor Build t

output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient + w.r.t. the input of the convolution.

Computes the gradients of depthwise convolution with respect to the input.

depthwiseConv2dNativeBackpropInput'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int32

input_sizes: An integer vector representing the shape of input, + where input is a 4-D `[batch, height, width, channels]` tensor.

-> Tensor v'2 t

filter: 4-D with shape + `[filter_height, filter_width, in_channels, depthwise_multiplier]`.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + Gradients w.r.t. the output of the convolution.

-> Tensor Build t

output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient + w.r.t. the input of the convolution.

dequantize

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` t 
=> Tensor v'1 t

input

-> Tensor v'2 Float

min_range: The minimum scalar value possibly produced for the input.

-> Tensor v'3 Float

max_range: The maximum scalar value possibly produced for the input.

-> Tensor Build Float

output

Dequantize the input tensor into a float Tensor.

min_range, max_range
are scalar floats that specify the range for + the input data. The mode attribute controls exactly which calculations are + used to convert the float values to their quantized equivalents.

In MIN_COMBINED mode, each value of the tensor will undergo the following:

``` + if T == qint8, in[i] += (range(T) + 1)/ 2.0 + out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + ``` + here `range(T) = numeric_limitsT::max() - numeric_limitsT::min()`

  • MIN_COMBINED Mode Example*

If the input comes from a QuantizedRelu6, the output type is + quint8 (range of 0-255) but the possible range of QuantizedRelu6 is + 0-6. The min_range and max_range values are therefore 0.0 and 6.0. + Dequantize on quint8 will take each value, cast to float, and multiply + by 6 / 255. + Note that if quantizedtype is qint8, the operation will additionally add + each value by 128 prior to casting.

If the mode is MIN_FIRST, then this approach is used:

``` + number_of_steps = 1 << (# of bits in T) + range_adjust = number_of_steps / (number_of_steps - 1) + range = (range_max - range_min) * range_adjust + range_scale = range / number_of_steps + const double offset_input = static_castdouble(input) - lowest_quantized; + result = range_min + ((input - numeric_limitsT::min()) * range_scale) + ```

dequantize'

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 Float

min_range: The minimum scalar value possibly produced for the input.

-> Tensor v'3 Float

max_range: The maximum scalar value possibly produced for the input.

-> Tensor Build Float

output

deserializeManySparse

Arguments

:: TensorType dtype 
=> Tensor v'1 ByteString

serialized_sparse: 2-D, The N serialized SparseTensor objects. + Must have 3 columns.

-> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64)

(sparse_indices, sparse_values, sparse_shape)

  • sparse_indices
  • sparse_values
  • sparse_shape

Deserialize and concatenate SparseTensors from a serialized minibatch.

The input serialized_sparse must be a string matrix of shape `[N x 3]` where + N is the minibatch size and the rows correspond to packed outputs of + SerializeSparse. The ranks of the original SparseTensor objects + must all match. When the final SparseTensor is created, it has rank one + higher than the ranks of the incoming SparseTensor objects + (they have been concatenated along a new row dimension).

The output SparseTensor object's shape values for all dimensions but the + first are the max across the input SparseTensor objects' shape values + for the corresponding dimensions. Its first shape value is N, the minibatch + size.

The input SparseTensor objects' indices are assumed ordered in + standard lexicographic order. If this is not the case, after this + step run SparseReorder to restore index ordering.

For example, if the serialized input is a `[2 x 3]` matrix representing two + original SparseTensor objects:

index = [ 0] + [10] + [20] + values = [1, 2, 3] + shape = [50]

and

index = [ 2] + [10] + values = [4, 5] + shape = [30]

then the final deserialized SparseTensor will be:

index = [0 0] + [0 10] + [0 20] + [1 2] + [1 10] + values = [1, 2, 3, 4, 5] + shape = [2 50]

deserializeManySparse'

Arguments

:: TensorType dtype 
=> OpParams 
-> Tensor v'1 ByteString

serialized_sparse: 2-D, The N serialized SparseTensor objects. + Must have 3 columns.

-> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64)

(sparse_indices, sparse_values, sparse_shape)

  • sparse_indices
  • sparse_values
  • sparse_shape

destroyTemporaryVariable

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref t

ref: A reference to the temporary variable tensor.

-> m' (Tensor Value t)

value

Destroys the temporary variable and returns its final value.

Sets output to the value of the Tensor pointed to by ref, then destroys + the temporary variable called var_name. + All other uses of ref *must* have executed before this op. + This is typically achieved by chaining the ref through each assign op, or by + using control dependencies.

Outputs the final value of the tensor pointed to by ref.

destroyTemporaryVariable'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref t

ref: A reference to the temporary variable tensor.

-> m' (Tensor Value t)

value

diag

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t 
=> Tensor v'1 t

diagonal: Rank k tensor where k is at most 3.

-> Tensor Build t

output

Returns a diagonal tensor with a given diagonal values.

Given a diagonal, this operation returns a tensor with the diagonal and + everything else padded with zeros. The diagonal is computed as follows:

Assume diagonal has dimensions [D1,..., Dk], then the output is a tensor of + rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:

`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.

For example:

```prettyprint + # diagonal is [1, 2, 3, 4] + tf.diag(diagonal) ==> [[1, 0, 0, 0] + [0, 2, 0, 0] + [0, 0, 3, 0] + [0, 0, 0, 4]] + ```

diag'

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

diagonal: Rank k tensor where k is at most 3.

-> Tensor Build t

output

diagPart

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t 
=> Tensor v'1 t

input: Rank k tensor where k is 2, 4, or 6.

-> Tensor Build t

diagonal: The extracted diagonal.

Returns the diagonal part of the tensor.

This operation returns a tensor with the diagonal part + of the input. The diagonal part is computed as follows:

Assume input has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a + tensor of rank k with dimensions `[D1,..., Dk]` where:

`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.

For example:

```prettyprint + # input is [[1, 0, 0, 0] + [0, 2, 0, 0] + [0, 0, 3, 0] + [0, 0, 0, 4]]

tf.diag_part(input) ==> [1, 2, 3, 4] + ```

diagPart'

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Rank k tensor where k is 2, 4, or 6.

-> Tensor Build t

diagonal: The extracted diagonal.

digamma

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes Psi, the derivative of Lgamma (the log of the absolute value of

`Gamma(x)`), element-wise.

digamma'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

dilation2D

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, depth]`.

-> Tensor v'2 t

filter: 3-D with shape `[filter_height, filter_width, depth]`.

-> Tensor Build t

output: 4-D with shape `[batch, out_height, out_width, depth]`.

Computes the grayscale dilation of 4-D input and 3-D filter tensors.

The input tensor has shape `[batch, in_height, in_width, depth]` and the + filter tensor has shape `[filter_height, filter_width, depth]`, i.e., each + input channel is processed independently of the others with its own structuring + function. The output tensor has shape + `[batch, out_height, out_width, depth]`. The spatial dimensions of the output + tensor depend on the padding algorithm. We currently only support the default + NHWC data_format.

In detail, the grayscale morphological 2-D dilation is the max-sum correlation + (for consistency with conv2d, we use unmirrored filters):

output[b, y, x, c] = + max_{dy, dx} input[b, + strides[1] * y + rates[1] * dy, + strides[2] * x + rates[2] * dx, + c] + + filter[dy, dx, c]

Max-pooling is a special case when the filter has size equal to the pooling + kernel size and contains all zeros.

Note on duality: The dilation of input by the filter is equal to the + negation of the erosion of `-input` by the reflected filter.

dilation2D'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, depth]`.

-> Tensor v'2 t

filter: 3-D with shape `[filter_height, filter_width, depth]`.

-> Tensor Build t

output: 4-D with shape `[batch, out_height, out_width, depth]`.

dilation2DBackpropFilter

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, depth]`.

-> Tensor v'2 t

filter: 3-D with shape `[filter_height, filter_width, depth]`.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

-> Tensor Build t

filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`.

Computes the gradient of morphological 2-D dilation with respect to the filter.

dilation2DBackpropFilter'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, depth]`.

-> Tensor v'2 t

filter: 3-D with shape `[filter_height, filter_width, depth]`.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

-> Tensor Build t

filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`.

dilation2DBackpropInput

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, depth]`.

-> Tensor v'2 t

filter: 3-D with shape `[filter_height, filter_width, depth]`.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

-> Tensor Build t

in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`.

Computes the gradient of morphological 2-D dilation with respect to the input.

dilation2DBackpropInput'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, depth]`.

-> Tensor v'2 t

filter: 3-D with shape `[filter_height, filter_width, depth]`.

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

-> Tensor Build t

in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`.

div

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns x / y element-wise.

  • NOTE*: Div supports broadcasting. More about broadcasting + here

div'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

drawBoundingBoxes

Arguments

:: OneOf `[Word16, Float]` t 
=> Tensor v'1 t

images: 4-D with shape `[batch, height, width, depth]`. A batch of images.

-> Tensor v'2 Float

boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding + boxes.

-> Tensor Build t

output: 4-D with the same shape as images. The batch of input images with + bounding boxes drawn on the images.

Draw bounding boxes on a batch of images.

Outputs a copy of images but draws on top of the pixels zero or more bounding + boxes specified by the locations in boxes. The coordinates of the each + bounding box in boxes are encoded as `[y_min, x_min, y_max, x_max]`. The + bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + height of the underlying image.

For example, if an image is 100 x 200 pixels and the bounding box is + `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the + bounding box will be `(10, 40)` to `(50, 180)`.

Parts of the bounding box may fall outside the image.

drawBoundingBoxes'

Arguments

:: OneOf `[Word16, Float]` t 
=> OpParams 
-> Tensor v'1 t

images: 4-D with shape `[batch, height, width, depth]`. A batch of images.

-> Tensor v'2 Float

boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding + boxes.

-> Tensor Build t

output: 4-D with the same shape as images. The batch of input images with + bounding boxes drawn on the images.

dynamicPartition

Arguments

:: TensorType t 
=> Int64

num_partitions: The number of partitions to output.

-> Tensor v'1 t

data

-> Tensor v'2 Int32

partitions: Any shape. Indices in the range `[0, num_partitions)`.

-> [Tensor Build t]

outputs

Partitions `data` into num_partitions tensors using indices from partitions.

For each index tuple js of size `partitions.ndim`, the slice `data[js, ...]` + becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` + are placed in `outputs[i]` in lexicographic order of js, and the first + dimension of `outputs[i]` is the number of entries in partitions equal to i. + In detail,

```python + outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]

outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) + ```

`data.shape` must start with `partitions.shape`.

For example:

```python + # Scalar partitions. + partitions = 1 + num_partitions = 2 + data = [10, 20] + outputs[0] = [] # Empty with shape [0, 2] + outputs[1] = [[10, 20]]

# Vector partitions. + partitions = [0, 0, 1, 1, 0] + num_partitions = 2 + data = [10, 20, 30, 40, 50] + outputs[0] = [10, 20, 50] + outputs[1] = [30, 40] + ```

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/DynamicPartition.png" alt + /div

dynamicPartition'

Arguments

:: TensorType t 
=> OpParams 
-> Int64

num_partitions: The number of partitions to output.

-> Tensor v'1 t

data

-> Tensor v'2 Int32

partitions: Any shape. Indices in the range `[0, num_partitions)`.

-> [Tensor Build t]

outputs

dynamicStitch

Arguments

:: TensorType t 
=> [Tensor v'1 Int32]

indices

-> [Tensor v'2 t]

data

-> Tensor Build t

merged

Interleave the values from the `data` tensors into a single tensor.

Builds a merged tensor such that

```python + merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] + ```

For example, if each `indices[m]` is scalar or vector, we have

```python + # Scalar indices: + merged[indices[m], ...] = data[m][...]

# Vector indices: + merged[indices[m][i], ...] = data[m][i, ...] + ```

Each `data[i].shape` must start with the corresponding `indices[i].shape`, + and the rest of `data[i].shape` must be constant w.r.t. i. That is, we + must have `data[i].shape = indices[i].shape + constant`. In terms of this + constant, the output shape is

merged.shape = [max(indices)] + constant

Values are merged in order, so if an index appears in both `indices[m][i]` and + `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the + merged result.

For example:

```python + indices[0] = 6 + indices[1] = [4, 1] + indices[2] = [[5, 2], [0, 3]] + data[0] = [61, 62] + data[1] = [[41, 42], [11, 12]] + data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + [51, 52], [61, 62]] + ```

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/DynamicStitch.png" alt + /div

dynamicStitch'

Arguments

:: TensorType t 
=> OpParams 
-> [Tensor v'1 Int32]

indices

-> [Tensor v'2 t]

data

-> Tensor Build t

merged

editDistance

Arguments

:: TensorType t 
=> Tensor v'1 Int64

hypothesis_indices: The indices of the hypothesis list SparseTensor. + This is an N x R int64 matrix.

-> Tensor v'2 t

hypothesis_values: The values of the hypothesis list SparseTensor. + This is an N-length vector.

-> Tensor v'3 Int64

hypothesis_shape: The shape of the hypothesis list SparseTensor. + This is an R-length vector.

-> Tensor v'4 Int64

truth_indices: The indices of the truth list SparseTensor. + This is an M x R int64 matrix.

-> Tensor v'5 t

truth_values: The values of the truth list SparseTensor. + This is an M-length vector.

-> Tensor v'6 Int64

truth_shape: truth indices, vector.

-> Tensor Build Float

output: A dense float tensor with rank R - 1.

For the example input:

// hypothesis represents a 2x1 matrix with variable-length values: + // (0,0) = ["a"] + // (1,0) = ["b"] + hypothesis_indices = [[0, 0, 0], + [1, 0, 0]] + hypothesis_values = ["a", "b"] + hypothesis_shape = [2, 1, 1]

// truth represents a 2x2 matrix with variable-length values: + // (0,0) = [] + // (0,1) = ["a"] + // (1,0) = ["b", "c"] + // (1,1) = ["a"] + truth_indices = [[0, 1, 0], + [1, 0, 0], + [1, 0, 1], + [1, 1, 0]] + truth_values = ["a", "b", "c", "a"] + truth_shape = [2, 2, 2] + normalize = true

The output will be:

// output is a 2x2 matrix with edit distances normalized by truth lengths. + output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis + [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis

Computes the (possibly normalized) Levenshtein Edit Distance.

The inputs are variable-length sequences provided by SparseTensors + (hypothesis_indices, hypothesis_values, hypothesis_shape) + and + (truth_indices, truth_values, truth_shape).

The inputs are:

editDistance'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 Int64

hypothesis_indices: The indices of the hypothesis list SparseTensor. + This is an N x R int64 matrix.

-> Tensor v'2 t

hypothesis_values: The values of the hypothesis list SparseTensor. + This is an N-length vector.

-> Tensor v'3 Int64

hypothesis_shape: The shape of the hypothesis list SparseTensor. + This is an R-length vector.

-> Tensor v'4 Int64

truth_indices: The indices of the truth list SparseTensor. + This is an M x R int64 matrix.

-> Tensor v'5 t

truth_values: The values of the truth list SparseTensor. + This is an M-length vector.

-> Tensor v'6 Int64

truth_shape: truth indices, vector.

-> Tensor Build Float

output: A dense float tensor with rank R - 1.

For the example input:

// hypothesis represents a 2x1 matrix with variable-length values: + // (0,0) = ["a"] + // (1,0) = ["b"] + hypothesis_indices = [[0, 0, 0], + [1, 0, 0]] + hypothesis_values = ["a", "b"] + hypothesis_shape = [2, 1, 1]

// truth represents a 2x2 matrix with variable-length values: + // (0,0) = [] + // (0,1) = ["a"] + // (1,0) = ["b", "c"] + // (1,1) = ["a"] + truth_indices = [[0, 1, 0], + [1, 0, 0], + [1, 0, 1], + [1, 1, 0]] + truth_values = ["a", "b", "c", "a"] + truth_shape = [2, 2, 2] + normalize = true

The output will be:

// output is a 2x2 matrix with edit distances normalized by truth lengths. + output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis + [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis

elu

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

features

-> Tensor Build t

activations

Computes exponential linear: `exp(features) - 1` if < 0, features otherwise.

See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)

elu'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

features

-> Tensor Build t

activations

eluGrad

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

gradients: The backpropagated gradients to the corresponding Elu operation.

-> Tensor v'2 t

outputs: The outputs of the corresponding Elu operation.

-> Tensor Build t

backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0, + gradients otherwise.

Computes gradients for the exponential linear (Elu) operation.

eluGrad'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

gradients: The backpropagated gradients to the corresponding Elu operation.

-> Tensor v'2 t

outputs: The outputs of the corresponding Elu operation.

-> Tensor Build t

backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0, + gradients otherwise.

encodeBase64

Arguments

:: Tensor v'1 ByteString

input: Strings to be encoded.

-> Tensor Build ByteString

output: Input strings encoded in base64.

Encode strings into web-safe base64 format.

Refer to the following article for more information on base64 format: + en.wikipedia.orgwikiBase64. Base64 strings may have padding with '=' at the + end so that the encoded has length multiple of 4. See Padding section of the + link above.

Web-safe means that the encoder uses - and _ instead of + and /.

encodeBase64'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

input: Strings to be encoded.

-> Tensor Build ByteString

output: Input strings encoded in base64.

encodeJpeg

Arguments

:: Tensor v'1 Word8

image: 3-D with shape `[height, width, channels]`.

-> Tensor Build ByteString

contents: 0-D. JPEG-encoded image.

JPEG-encode an image.

image is a 3-D uint8 Tensor of shape `[height, width, channels]`.

The attr format can be used to override the color format of the encoded + output. Values can be:

  • `''`: Use a default format based on the number of channels in the image.
  • grayscale: Output a grayscale JPEG image. The channels dimension + of image must be 1.
  • rgb: Output an RGB JPEG image. The channels dimension + of image must be 3.

If format is not specified or is the empty string, a default format is picked + in function of the number of channels in image:

  • 1: Output a grayscale image.
  • 3: Output an RGB image.

encodeJpeg'

Arguments

:: OpParams 
-> Tensor v'1 Word8

image: 3-D with shape `[height, width, channels]`.

-> Tensor Build ByteString

contents: 0-D. JPEG-encoded image.

encodePng

Arguments

:: OneOf `[Word16, Word8]` t 
=> Tensor v'1 t

image: 3-D with shape `[height, width, channels]`.

-> Tensor Build ByteString

contents: 0-D. PNG-encoded image.

PNG-encode an image.

image is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` + where channels is:

  • 1: for grayscale.
  • 2: for grayscale + alpha.
  • 3: for RGB.
  • 4: for RGBA.

The ZLIB compression level, compression, can be -1 for the PNG-encoder + default or a value from 0 to 9. 9 is the highest compression level, generating + the smallest output, but is slower.

encodePng'

Arguments

:: OneOf `[Word16, Word8]` t 
=> OpParams 
-> Tensor v'1 t

image: 3-D with shape `[height, width, channels]`.

-> Tensor Build ByteString

contents: 0-D. PNG-encoded image.

enter

Arguments

:: TensorType t 
=> Tensor v'1 t

data: The tensor to be made available to the child frame.

-> Tensor Build t

output: The same tensor as `data`.

Creates or finds a child frame, and makes `data` available to the child frame.

This op is used together with Exit to create loops in the graph. + The unique frame_name is used by the Executor to identify frames. If + is_constant is true, output is a constant in the child frame; otherwise + it may be changed in the child frame. At most parallel_iterations iterations + are run in parallel in the child frame.

enter'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

data: The tensor to be made available to the child frame.

-> Tensor Build t

output: The same tensor as `data`.

equal

Returns the truth value of (x == y) element-wise.

  • NOTE*: Equal supports broadcasting. More about broadcasting + here

erf

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes the Gauss error function of x element-wise.

erf'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

erfc

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes the complementary error function of x element-wise.

erfc'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

exit

Arguments

:: TensorType t 
=> Tensor v'1 t

data: The tensor to be made available to the parent frame.

-> Tensor Build t

output: The same tensor as `data`.

Exits the current frame to its parent frame.

Exit makes its input `data` available to the parent frame.

exit'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

data: The tensor to be made available to the parent frame.

-> Tensor Build t

output: The same tensor as `data`.

exp

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes exponential of x element-wise. \(y = e^x\).

exp'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

expandDims

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tdim) 
=> Tensor v'1 t

input

-> Tensor v'2 tdim

dim: 0-D (scalar). Specifies the dimension index at which to + expand the shape of input.

-> Tensor Build t

output: Contains the same data as input, but its shape has an additional + dimension of size 1 added.

Inserts a dimension of 1 into a tensor's shape.

Given a tensor input, this operation inserts a dimension of 1 at the + dimension index dim of input's shape. The dimension index dim starts at + zero; if you specify a negative number for dim it is counted backward from + the end.

This operation is useful if you want to add a batch dimension to a single + element. For example, if you have a single image of shape `[height, width, + channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, + which will make the shape `[1, height, width, channels]`.

Other examples:

```prettyprint + # t is a tensor of shape [2] + shape(expand_dims(t, 0)) ==> [1, 2] + shape(expand_dims(t, 1)) ==> [2, 1] + shape(expand_dims(t, -1)) ==> [2, 1]

# t2 is a tensor of shape [2, 3, 5] + shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] + shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] + shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] + ```

This operation requires that:

`-1-input.dims() <= dim <= input.dims()`

This operation is related to `squeeze()`, which removes dimensions of + size 1.

expandDims'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tdim) 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 tdim

dim: 0-D (scalar). Specifies the dimension index at which to + expand the shape of input.

-> Tensor Build t

output: Contains the same data as input, but its shape has an additional + dimension of size 1 added.

expm1

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes exponential of x - 1 element-wise.

I.e., \(y = (exp x) - 1\).

expm1'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

extractGlimpse

Arguments

:: Tensor v'1 Float

input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.

-> Tensor v'2 Int32

size: A 1-D tensor of 2 elements containing the size of the glimpses + to extract. The glimpse height must be specified first, following + by the glimpse width.

-> Tensor v'3 Float

offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing + the x, y locations of the center of each window.

-> Tensor Build Float

glimpse: A tensor representing the glimpses `[batch_size, + glimpse_height, glimpse_width, channels]`.

Extracts a glimpse from the input tensor.

Returns a set of windows called glimpses extracted at location + offsets from the input tensor. If the windows only partially + overlaps the inputs, the non overlapping areas will be filled with + random noise.

The result is a 4-D tensor of shape `[batch_size, glimpse_height, + glimpse_width, channels]`. The channels and batch dimensions are the + same as that of the input tensor. The height and width of the output + windows are specified in the size parameter.

The argument normalized and centered controls how the windows are built:

  • If the coordinates are normalized but not centered, 0.0 and 1.0 + correspond to the minimum and maximum of each height and width + dimension.
  • If the coordinates are both normalized and centered, they range from
  • 1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper + left corner, the lower right corner is located at (1.0, 1.0) and the + center is at (0, 0).
  • If the coordinates are not normalized they are interpreted as + numbers of pixels.

extractGlimpse'

Arguments

:: OpParams 
-> Tensor v'1 Float

input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.

-> Tensor v'2 Int32

size: A 1-D tensor of 2 elements containing the size of the glimpses + to extract. The glimpse height must be specified first, following + by the glimpse width.

-> Tensor v'3 Float

offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing + the x, y locations of the center of each window.

-> Tensor Build Float

glimpse: A tensor representing the glimpses `[batch_size, + glimpse_height, glimpse_width, channels]`.

extractImagePatches

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.

-> Tensor Build t

patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * + ksize_cols * depth]` containing image patches with size + `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension.

Extract patches from images and put them in the "depth" output dimension.

extractImagePatches'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.

-> Tensor Build t

patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * + ksize_cols * depth]` containing image patches with size + `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension.

fFT

Arguments

:: Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most + dimension of input is replaced with its 1D Fourier Transform.

Compute the 1-dimensional discrete Fourier Transform over the inner-most

dimension of input.

fFT'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most + dimension of input is replaced with its 1D Fourier Transform.

fFT2D

Arguments

:: Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 2 + dimensions of input are replaced with their 2D Fourier Transform.

compatibility(numpy) + Equivalent to np.fft2 + end_compatibility

Compute the 2-dimensional discrete Fourier Transform over the inner-most

2 dimensions of input.

fFT2D'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 2 + dimensions of input are replaced with their 2D Fourier Transform.

compatibility(numpy) + Equivalent to np.fft2 + end_compatibility

fFT3D

Arguments

:: Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 3 + dimensions of input are replaced with their 3D Fourier Transform.

compatibility(numpy) + Equivalent to np.fft3 + end_compatibility

Compute the 3-dimensional discrete Fourier Transform over the inner-most 3

dimensions of input.

fFT3D'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 3 + dimensions of input are replaced with their 3D Fourier Transform.

compatibility(numpy) + Equivalent to np.fft3 + end_compatibility

fIFOQueue

Arguments

:: MonadBuild m' 
=> [DataType]

component_types: The type of each component in a value.

-> m' (Tensor Ref ByteString)

handle: The handle to the queue.

A queue that produces elements in first-in first-out order.

fIFOQueue'

Arguments

:: MonadBuild m' 
=> OpParams 
-> [DataType]

component_types: The type of each component in a value.

-> m' (Tensor Ref ByteString)

handle: The handle to the queue.

fIFOQueueV2

Arguments

:: MonadBuild m' 
=> [DataType]

component_types: The type of each component in a value.

-> m' ResourceHandle

handle: The handle to the queue.

A queue that produces elements in first-in first-out order.

fIFOQueueV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> [DataType]

component_types: The type of each component in a value.

-> m' ResourceHandle

handle: The handle to the queue.

fact

Arguments

:: Tensor Build ByteString

fact

Output a fact about factorials.

fact'

Arguments

:: OpParams 
-> Tensor Build ByteString

fact

fakeQuantWithMinMaxArgs

Arguments

:: Tensor v'1 Float

inputs

-> Tensor Build Float

outputs

Fake-quantize the inputs tensor, type float to outputs tensor of same type.

Attributes [min; max] define the clamping range for the inputs data. Op + divides this range into 255 steps (total of 256 values), then replaces each + inputs value with the closest of the quantized step values.

Quantization is called fake since the output is still in floating point.

fakeQuantWithMinMaxArgs'

Arguments

:: OpParams 
-> Tensor v'1 Float

inputs

-> Tensor Build Float

outputs

fakeQuantWithMinMaxArgsGradient

Arguments

:: Tensor v'1 Float

gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.

-> Tensor v'2 Float

inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.

-> Tensor Build Float

backprops: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: + `gradients * (inputs >= min && inputs <= max)`.

Compute gradients for a FakeQuantWithMinMaxArgs operation.

fakeQuantWithMinMaxArgsGradient'

Arguments

:: OpParams 
-> Tensor v'1 Float

gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.

-> Tensor v'2 Float

inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.

-> Tensor Build Float

backprops: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: + `gradients * (inputs >= min && inputs <= max)`.

fakeQuantWithMinMaxVars

Arguments

:: Tensor v'1 Float

inputs

-> Tensor v'2 Float

min

-> Tensor v'3 Float

max

-> Tensor Build Float

outputs

Fake-quantize the inputs tensor of type float and shape `[b, h, w, d]` via

global float scalars min and max to outputs tensor of same shape as + inputs.

min; max
is the clamping range for the inputs data. Op divides this range + into 255 steps (total of 256 values), then replaces each inputs value with the + closest of the quantized step values.

This operation has a gradient and thus allows for training min and max values.

fakeQuantWithMinMaxVars'

Arguments

:: OpParams 
-> Tensor v'1 Float

inputs

-> Tensor v'2 Float

min

-> Tensor v'3 Float

max

-> Tensor Build Float

outputs

fakeQuantWithMinMaxVarsGradient

Arguments

:: Tensor v'1 Float

gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.

-> Tensor v'2 Float

inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. + min, max: Quantization interval, scalar floats.

-> Tensor v'3 Float

min

-> Tensor v'4 Float

max

-> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

(backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

  • backprops_wrt_input: Backpropagated gradients w.r.t. inputs: + `gradients * (inputs >= min && inputs <= max)`.
  • backprop_wrt_min: Backpropagated gradients w.r.t. min parameter: + `sum(gradients * (inputs < min))`.
  • backprop_wrt_max: Backpropagated gradients w.r.t. max parameter: + `sum(gradients * (inputs > max))`.

Compute gradients for a FakeQuantWithMinMaxVars operation.

fakeQuantWithMinMaxVarsGradient'

Arguments

:: OpParams 
-> Tensor v'1 Float

gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.

-> Tensor v'2 Float

inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. + min, max: Quantization interval, scalar floats.

-> Tensor v'3 Float

min

-> Tensor v'4 Float

max

-> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

(backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

  • backprops_wrt_input: Backpropagated gradients w.r.t. inputs: + `gradients * (inputs >= min && inputs <= max)`.
  • backprop_wrt_min: Backpropagated gradients w.r.t. min parameter: + `sum(gradients * (inputs < min))`.
  • backprop_wrt_max: Backpropagated gradients w.r.t. max parameter: + `sum(gradients * (inputs > max))`.

fakeQuantWithMinMaxVarsPerChannel

Arguments

:: Tensor v'1 Float

inputs

-> Tensor v'2 Float

min

-> Tensor v'3 Float

max

-> Tensor Build Float

outputs

Fake-quantize the inputs tensor of type float and one of the shapes: `[d]`,

`[b, d]` `[b, h, w, d]` via per-channel floats min and max of shape `[d]` + to outputs tensor of same shape as inputs.

min; max
is the clamping range for the inputs data in the corresponding + depth channel. Op divides this range into 255 steps (total of 256 values), then + replaces each inputs value with the closest of the quantized step values.

This operation has a gradient and thus allows for training min and max values.

fakeQuantWithMinMaxVarsPerChannel'

Arguments

:: OpParams 
-> Tensor v'1 Float

inputs

-> Tensor v'2 Float

min

-> Tensor v'3 Float

max

-> Tensor Build Float

outputs

fakeQuantWithMinMaxVarsPerChannelGradient

Arguments

:: Tensor v'1 Float

gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, + shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.

-> Tensor v'2 Float

inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape + same as gradients. + min, max: Quantization interval, floats of shape `[d]`.

-> Tensor v'3 Float

min

-> Tensor v'4 Float

max

-> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

(backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

  • backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as + inputs: + `gradients * (inputs >= min && inputs <= max)`.
  • backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`: + `sum_per_d(gradients * (inputs < min))`.
  • backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`: + `sum_per_d(gradients * (inputs > max))`.

Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.

fakeQuantWithMinMaxVarsPerChannelGradient'

Arguments

:: OpParams 
-> Tensor v'1 Float

gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, + shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.

-> Tensor v'2 Float

inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape + same as gradients. + min, max: Quantization interval, floats of shape `[d]`.

-> Tensor v'3 Float

min

-> Tensor v'4 Float

max

-> (Tensor Build Float, Tensor Build Float, Tensor Build Float)

(backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

  • backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as + inputs: + `gradients * (inputs >= min && inputs <= max)`.
  • backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`: + `sum_per_d(gradients * (inputs < min))`.
  • backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`: + `sum_per_d(gradients * (inputs > max))`.

fakeQueue

Arguments

:: MonadBuild m' 
=> ResourceHandle

resource

-> m' (Tensor Ref ByteString)

handle

Deprecated. Do not use.

fakeQueue'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

resource

-> m' (Tensor Ref ByteString)

handle

fill

Arguments

:: TensorType t 
=> Tensor v'1 Int32

dims: 1-D. Represents the shape of the output tensor.

-> Tensor v'2 t

value: 0-D (scalar). Value to fill the returned tensor.

compatibility(numpy) + Equivalent to np.full + end_compatibility

-> Tensor Build t

output

Creates a tensor filled with a scalar value.

This operation creates a tensor of shape dims and fills it with value.

For example:

```prettyprint + # Output tensor has shape [2, 3]. + fill([2, 3], 9) ==> [[9, 9, 9] + [9, 9, 9]] + ```

fill'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 Int32

dims: 1-D. Represents the shape of the output tensor.

-> Tensor v'2 t

value: 0-D (scalar). Value to fill the returned tensor.

compatibility(numpy) + Equivalent to np.full + end_compatibility

-> Tensor Build t

output

fixedLengthRecordReader

Arguments

:: MonadBuild m' 
=> Int64

record_bytes

-> m' (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

A Reader that outputs fixed-length records from a file.

fixedLengthRecordReader'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Int64

record_bytes

-> m' (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

fixedLengthRecordReaderV2

Arguments

:: MonadBuild m' 
=> Int64

record_bytes

-> m' ResourceHandle

reader_handle: The handle to reference the Reader.

A Reader that outputs fixed-length records from a file.

fixedLengthRecordReaderV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Int64

record_bytes

-> m' ResourceHandle

reader_handle: The handle to reference the Reader.

fixedUnigramCandidateSampler

Arguments

:: Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

Generates labels for candidate sampling with a learned unigram distribution.

A unigram sampler could use a fixed unigram distribution read from a + file or passed in as an in-memory array instead of building up the distribution + from data on the fly. There is also an option to skew the distribution by + applying a distortion power to the weights.

The vocabulary file should be in CSV-like format, with the last field + being the weight associated with the word.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

fixedUnigramCandidateSampler'

Arguments

:: OpParams 
-> Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

floor

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Returns element-wise largest integer not greater than x.

floor'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

floorDiv

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns x // y element-wise.

  • NOTE*: FloorDiv supports broadcasting. More about broadcasting + here

floorMod

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns element-wise remainder of division. When `x < 0` xor `y < 0` is

true, this follows Python semantics in that the result here is consistent + with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.

  • NOTE*: FloorMod supports broadcasting. More about broadcasting + here

floorMod'

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

fractionalAvgPool

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> Tensor v'1 t

value: 4-D with shape `[batch, height, width, channels]`.

-> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)

(output, row_pooling_sequence, col_pooling_sequence)

  • output: output tensor after fractional avg pooling.
  • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
  • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

Performs fractional average pooling on the input.

Fractional average pooling is similar to Fractional max pooling in the pooling + region generation step. The only difference is that after pooling regions are + generated, a mean operation is performed instead of a max operation in each + pooling region.

fractionalAvgPool'

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

value: 4-D with shape `[batch, height, width, channels]`.

-> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)

(output, row_pooling_sequence, col_pooling_sequence)

  • output: output tensor after fractional avg pooling.
  • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
  • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

fractionalAvgPoolGrad

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> Tensor v'1 Int64

orig_input_tensor_shape: Original input tensor shape for fractional_avg_pool

-> Tensor v'2 t

out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + w.r.t. the output of fractional_avg_pool.

-> Tensor v'3 Int64

row_pooling_sequence: row pooling sequence, form pooling region with + col_pooling_sequence.

-> Tensor v'4 Int64

col_pooling_sequence: column pooling sequence, form pooling region with + row_pooling sequence.

-> Tensor Build t

output: 4-D. Gradients w.r.t. the input of fractional_avg_pool.

Computes gradient of the FractionalAvgPool function.

Unlike FractionalMaxPoolGrad, we don't need to find arg_max for + FractionalAvgPoolGrad, we just need to evenly back-propagate each element of + out_backprop to those indices that form the same pooling cell. Therefore, we + just need to know the shape of original input tensor, instead of the whole + tensor.

fractionalAvgPoolGrad'

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int64

orig_input_tensor_shape: Original input tensor shape for fractional_avg_pool

-> Tensor v'2 t

out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + w.r.t. the output of fractional_avg_pool.

-> Tensor v'3 Int64

row_pooling_sequence: row pooling sequence, form pooling region with + col_pooling_sequence.

-> Tensor v'4 Int64

col_pooling_sequence: column pooling sequence, form pooling region with + row_pooling sequence.

-> Tensor Build t

output: 4-D. Gradients w.r.t. the input of fractional_avg_pool.

fractionalMaxPool

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> Tensor v'1 t

value: 4-D with shape `[batch, height, width, channels]`.

-> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)

(output, row_pooling_sequence, col_pooling_sequence)

  • output: output tensor after fractional max pooling.
  • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
  • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

Performs fractional max pooling on the input.

Fractional max pooling is slightly different than regular max pooling. In + regular max pooling, you downsize an input set by taking the maximum value of + smaller N x N subsections of the set (often 2x2), and try to reduce the set by + a factor of N, where N is an integer. Fractional max pooling, as you might + expect from the word "fractional", means that the overall reduction ratio N + does not have to be an integer.

The sizes of the pooling regions are generated randomly but are fairly uniform. + For example, let's look at the height dimension, and the constraints on the + list of rows that will be pool boundaries.

First we define the following:

  1. input_row_length : the number of rows from the input set
  2. output_row_length : which will be smaller than the input
  3. alpha = input_row_length / output_row_length : our reduction ratio
  4. K = floor(alpha)
  5. row_pooling_sequence : this is the result list of pool boundary rows

Then, row_pooling_sequence should satisfy:

  1. a[0] = 0 : the first value of the sequence is 0
  2. a[end] = input_row_length : the last value of the sequence is the size
  3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
  4. length(row_pooling_sequence) = output_row_length+1

For more details on fractional max pooling, see this paper: + Benjamin Graham, Fractional Max-Pooling

fractionalMaxPool'

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

value: 4-D with shape `[batch, height, width, channels]`.

-> (Tensor Build t, Tensor Build Int64, Tensor Build Int64)

(output, row_pooling_sequence, col_pooling_sequence)

  • output: output tensor after fractional max pooling.
  • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
  • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

fractionalMaxPoolGrad

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> Tensor v'1 t

orig_input: Original input for fractional_max_pool

-> Tensor v'2 t

orig_output: Original output for fractional_max_pool

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + w.r.t. the output of fractional_max_pool.

-> Tensor v'4 Int64

row_pooling_sequence: row pooling sequence, form pooling region with + col_pooling_sequence.

-> Tensor v'5 Int64

col_pooling_sequence: column pooling sequence, form pooling region with + row_pooling sequence.

-> Tensor Build t

output: 4-D. Gradients w.r.t. the input of fractional_max_pool.

Computes gradient of the FractionalMaxPool function.

fractionalMaxPoolGrad'

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

orig_input: Original input for fractional_max_pool

-> Tensor v'2 t

orig_output: Original output for fractional_max_pool

-> Tensor v'3 t

out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + w.r.t. the output of fractional_max_pool.

-> Tensor v'4 Int64

row_pooling_sequence: row pooling sequence, form pooling region with + col_pooling_sequence.

-> Tensor v'5 Int64

col_pooling_sequence: column pooling sequence, form pooling region with + row_pooling sequence.

-> Tensor Build t

output: 4-D. Gradients w.r.t. the input of fractional_max_pool.

fusedBatchNorm

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

x: A 4D Tensor for input data.

-> Tensor v'2 t

scale: A 1D Tensor for scaling factor, to scale the normalized x.

-> Tensor v'3 t

offset: A 1D Tensor for offset, to shift to the normalized x.

-> Tensor v'4 t

mean: A 1D Tensor for population mean. Used for inference only; + must be empty for training.

-> Tensor v'5 t

variance: A 1D Tensor for population variance. Used for inference only; + must be empty for training.

-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

(y, batch_mean, batch_variance, reserve_space_1, reserve_space_2)

  • y: A 4D Tensor for output data.
  • batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow + to compute the running mean.
  • batch_variance: A 1D Tensor for the computed batch variance, to be used by + TensorFlow to compute the running variance.
  • reserve_space_1: A 1D Tensor for the computed batch mean, to be reused + in the gradient computation.
  • reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance + in the cuDNN case), to be used in the gradient computation.

Batch normalization.

Note that the size of 4D Tensors are defined by either NHWC or NCHW. + The size of 1D Tensors matches the dimension C of the 4D Tensors.

fusedBatchNorm'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x: A 4D Tensor for input data.

-> Tensor v'2 t

scale: A 1D Tensor for scaling factor, to scale the normalized x.

-> Tensor v'3 t

offset: A 1D Tensor for offset, to shift to the normalized x.

-> Tensor v'4 t

mean: A 1D Tensor for population mean. Used for inference only; + must be empty for training.

-> Tensor v'5 t

variance: A 1D Tensor for population variance. Used for inference only; + must be empty for training.

-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

(y, batch_mean, batch_variance, reserve_space_1, reserve_space_2)

  • y: A 4D Tensor for output data.
  • batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow + to compute the running mean.
  • batch_variance: A 1D Tensor for the computed batch variance, to be used by + TensorFlow to compute the running variance.
  • reserve_space_1: A 1D Tensor for the computed batch mean, to be reused + in the gradient computation.
  • reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance + in the cuDNN case), to be used in the gradient computation.

fusedBatchNormGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

y_backprop: A 4D Tensor for the gradient with respect to y.

-> Tensor v'2 t

x: A 4D Tensor for input data.

-> Tensor v'3 t

scale: A 1D Tensor for scaling factor, to scale the normalized x.

-> Tensor v'4 t

reserve_space_1: A 1D Tensor for the computed batch mean, to be reused + in the gradient computation.

-> Tensor v'5 t

reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance + in the cuDNN case), to be used in the gradient computation.

-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

(x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4)

  • x_backprop: A 4D Tensor for the gradient with respect to x.
  • scale_backprop: A 1D Tensor for the gradient with respect to scale.
  • offset_backprop: A 1D Tensor for the gradient with respect to offset.
  • reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
  • reserve_space_4: Unused placeholder to match the variance input + in FusedBatchNorm.

Gradient for batch normalization.

Note that the size of 4D Tensors are defined by either NHWC or NCHW. + The size of 1D Tensors matches the dimension C of the 4D Tensors.

fusedBatchNormGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

y_backprop: A 4D Tensor for the gradient with respect to y.

-> Tensor v'2 t

x: A 4D Tensor for input data.

-> Tensor v'3 t

scale: A 1D Tensor for scaling factor, to scale the normalized x.

-> Tensor v'4 t

reserve_space_1: A 1D Tensor for the computed batch mean, to be reused + in the gradient computation.

-> Tensor v'5 t

reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance + in the cuDNN case), to be used in the gradient computation.

-> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t)

(x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4)

  • x_backprop: A 4D Tensor for the gradient with respect to x.
  • scale_backprop: A 1D Tensor for the gradient with respect to scale.
  • offset_backprop: A 1D Tensor for the gradient with respect to offset.
  • reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
  • reserve_space_4: Unused placeholder to match the variance input + in FusedBatchNorm.

fusedPadConv2D

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v'2 Int32

paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

-> Tensor v'3 t

filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

-> Tensor Build t

output

Performs a padding as a preprocess during a convolution.

Similar to FusedResizeAndPadConv2d, this op allows for an optimized + implementation where the spatial padding transformation stage is fused with the + im2col lookup, but in this case without the bilinear filtering required for + resizing. Fusing the padding prevents the need to write out the intermediate + results as whole tensors, reducing memory pressure, and we can get some latency + gains by merging the transformation calculations. + The data_format attribute for Conv2D isn't supported by this op, and NHWC + order is used instead. + Internally this op uses a single per-graph scratch buffer, which means that it + will block if multiple versions are being run in parallel. This is because this + operator is primarily an optimization to minimize memory usage.

fusedPadConv2D'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v'2 Int32

paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

-> Tensor v'3 t

filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

-> Tensor Build t

output

fusedResizeAndPadConv2D

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v'2 Int32

size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

-> Tensor v'3 Int32

paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

-> Tensor v'4 t

filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

-> Tensor Build t

output

Performs a resize and padding as a preprocess during a convolution.

It's often possible to do spatial transformations more efficiently as part of + the packing stage of a convolution, so this op allows for an optimized + implementation where these stages are fused together. This prevents the need to + write out the intermediate results as whole tensors, reducing memory pressure, + and we can get some latency gains by merging the transformation calculations. + The data_format attribute for Conv2D isn't supported by this op, and defaults to + NHWC order. + Internally this op uses a single per-graph scratch buffer, which means that it + will block if multiple versions are being run in parallel. This is because this + operator is primarily an optimization to minimize memory usage.

fusedResizeAndPadConv2D'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v'2 Int32

size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

-> Tensor v'3 Int32

paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

-> Tensor v'4 t

filter: 4-D with shape + `[filter_height, filter_width, in_channels, out_channels]`.

-> Tensor Build t

output

gather

Arguments

:: (TensorType tparams, OneOf `[Int32, Int64]` tindices) 
=> Tensor v'1 tparams

params

-> Tensor v'2 tindices

indices

-> Tensor Build tparams

output

Gather slices from params according to indices.

indices must be an integer tensor of any dimension (usually 0-D or 1-D). + Produces an output tensor with shape `indices.shape + params.shape[1:]` where:

```python + # Scalar indices + output[:, ..., :] = params[indices, :, ... :]

# Vector indices + output[i, :, ..., :] = params[indices[i], :, ... :]

# Higher rank indices + output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + ```

If indices is a permutation and `len(indices) == params.shape[0]` then + this operation will permute params accordingly.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/Gather.png" alt + /div

gather'

Arguments

:: (TensorType tparams, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor v'1 tparams

params

-> Tensor v'2 tindices

indices

-> Tensor Build tparams

output

gatherNd

Arguments

:: (TensorType tparams, OneOf `[Int32, Int64]` tindices) 
=> Tensor v'1 tparams

params: `P-D`. The tensor from which to gather values.

-> Tensor v'2 tindices

indices: `Q-D`. Index tensor having shape `[d_0, ..., d_{Q-2}, K]`.

-> Tensor Build tparams

output: `(P+Q-K-1)-D`. Values from params gathered from indices given by + indices.

Gather values or slices from params according to indices.

params is a Tensor of rank P and indices is a Tensor of rank Q.

indices must be integer tensor, containing indices into params. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

The innermost dimension of indices (with length K) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the Kth + dimension of params.

Produces an output tensor with shape

``` + [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]]. + ```

Some examples below.

Simple indexing into a matrix:

```python + indices = [[0, 0], [1, 1]] + params = [[a, b], [c, d]] + output = [a, d] + ```

Slice indexing into a matrix:

```python + indices = [[1], [0]] + params = [[a, b], [c, d]] + output = [[c, d], [a, b]] + ```

Indexing into a 3-tensor:

```python + indices = [[1]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [[[a1, b1], [c1, d1]]]

indices = [[0, 1], [1, 0]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [[c0, d0], [a1, b1]]

indices = [[0, 0, 1], [1, 0, 1]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [b0, b1] + ```

Batched indexing into a matrix:

```python + indices = [[[0, 0]], [[0, 1]]] + params = [[a, b], [c, d]] + output = [[a], [b]] + ```

Batched slice indexing into a matrix:

```python + indices = [[[1]], [[0]]] + params = [[a, b], [c, d]] + output = [[[c, d]], [[a, b]]] + ```

Batched indexing into a 3-tensor:

```python + indices = [[[1]], [[0]]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [[[[a1, b1], [c1, d1]]], + [[[a0, b0], [c0, d0]]]]

indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [[[c0, d0], [a1, b1]], + [[a0, b0], [c1, d1]]]

indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] + params = [[[a0, b0], [c0, d0]], + [[a1, b1], [c1, d1]]] + output = [[b0, b1], [d0, c1]] + ```

gatherNd'

Arguments

:: (TensorType tparams, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor v'1 tparams

params: `P-D`. The tensor from which to gather values.

-> Tensor v'2 tindices

indices: `Q-D`. Index tensor having shape `[d_0, ..., d_{Q-2}, K]`.

-> Tensor Build tparams

output: `(P+Q-K-1)-D`. Values from params gathered from indices given by + indices.

getSessionHandle

Arguments

:: TensorType t 
=> Tensor v'1 t

value: The tensor to be stored.

-> Tensor Build ByteString

handle: The handle for the tensor stored in the session state.

Store the input tensor in the state of the current session.

getSessionHandle'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

value: The tensor to be stored.

-> Tensor Build ByteString

handle: The handle for the tensor stored in the session state.

getSessionTensor

Arguments

:: TensorType dtype 
=> Tensor v'1 ByteString

handle: The handle for a tensor stored in the session state.

-> Tensor Build dtype

value: The tensor for the given handle.

Get the value of the tensor specified by its handle.

getSessionTensor'

Arguments

:: TensorType dtype 
=> OpParams 
-> Tensor v'1 ByteString

handle: The handle for a tensor stored in the session state.

-> Tensor Build dtype

value: The tensor for the given handle.

greater

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build Bool

z

Returns the truth value of (x > y) element-wise.

  • NOTE*: Greater supports broadcasting. More about broadcasting + here

greater'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build Bool

z

greaterEqual

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build Bool

z

Returns the truth value of (x >= y) element-wise.

  • NOTE*: GreaterEqual supports broadcasting. More about broadcasting + here

greaterEqual'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build Bool

z

hSVToRGB

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.

-> Tensor Build t

output: images converted to RGB.

Convert one or more images from HSV to RGB.

Outputs a tensor of the same shape as the images tensor, containing the RGB + value of the pixels. The output is only well defined if the value in images + are in `[0,1]`.

See rgb_to_hsv for a description of the HSV encoding.

hSVToRGB'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.

-> Tensor Build t

output: images converted to RGB.

hashTable

Arguments

:: MonadBuild m' 
=> DataType

key_dtype: Type of the table keys.

-> DataType

value_dtype: Type of the table values.

-> m' (Tensor Ref ByteString)

table_handle: Handle to a table.

Creates a non-initialized hash table.

This op creates a hash table, specifying the type of its keys and values. + Before using the table you will have to initialize it. After initialization the + table will be immutable.

hashTable'

Arguments

:: MonadBuild m' 
=> OpParams 
-> DataType

key_dtype: Type of the table keys.

-> DataType

value_dtype: Type of the table values.

-> m' (Tensor Ref ByteString)

table_handle: Handle to a table.

histogramSummary

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 ByteString

tag: Scalar. Tag to use for the Value.

-> Tensor v'2 t

values: Any shape. Values to use to build the histogram.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

Outputs a Summary protocol buffer with a histogram.

The generated + `Summary` + has one summary value containing a histogram for values.

This op reports an InvalidArgument error if any value is not finite.

histogramSummary'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 ByteString

tag: Scalar. Tag to use for the Value.

-> Tensor v'2 t

values: Any shape. Values to use to build the histogram.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

iFFT

Arguments

:: Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most + dimension of input is replaced with its inverse 1D Fourier Transform.

Compute the inverse 1-dimensional discrete Fourier Transform over the inner-most

dimension of input.

iFFT'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most + dimension of input is replaced with its inverse 1D Fourier Transform.

iFFT2D

Arguments

:: Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 2 + dimensions of input are replaced with their inverse 2D Fourier Transform.

compatibility(numpy) + Equivalent to np.ifft2 + end_compatibility

Compute the inverse 2-dimensional discrete Fourier Transform over the inner-most

2 dimensions of input.

iFFT2D'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 2 + dimensions of input are replaced with their inverse 2D Fourier Transform.

compatibility(numpy) + Equivalent to np.ifft2 + end_compatibility

iFFT3D

Arguments

:: Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 3 + dimensions of input are replaced with their inverse 3D Fourier Transform.

compatibility(numpy) + Equivalent to np.fft3 + end_compatibility

Compute the inverse 3-dimensional discrete Fourier Transform over the inner-most

3 dimensions of input.

iFFT3D'

Arguments

:: OpParams 
-> Tensor v'1 (Complex Float)

input: A complex64 tensor.

-> Tensor Build (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 3 + dimensions of input are replaced with their inverse 3D Fourier Transform.

compatibility(numpy) + Equivalent to np.fft3 + end_compatibility

identity

Arguments

:: TensorType t 
=> Tensor v'1 t

input

-> Tensor Build t

output

Return a tensor with the same shape and contents as the input tensor or value.

identity'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build t

output

identityReader

Arguments

:: MonadBuild m' 
=> m' (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

A Reader that outputs the queued work as both the key and value.

To use, enqueue strings in a Queue. ReaderRead will take the front + work string and output (work, work).

identityReader'

Arguments

:: MonadBuild m' 
=> OpParams 
-> m' (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

identityReaderV2

Arguments

:: MonadBuild m' 
=> m' ResourceHandle

reader_handle: The handle to reference the Reader.

A Reader that outputs the queued work as both the key and value.

To use, enqueue strings in a Queue. ReaderRead will take the front + work string and output (work, work).

identityReaderV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> m' ResourceHandle

reader_handle: The handle to reference the Reader.

igamma

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

a

-> Tensor v'2 t

x

-> Tensor Build t

z

Compute the lower regularized incomplete Gamma function `Q(a, x)`.

The lower regularized incomplete Gamma function is defined as:

``` + P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) + ``` + where + ``` + gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt + ``` + is the lower incomplete Gamma function.

Note, above `Q(a, x)` (Igammac) is the upper regularized complete + Gamma function.

igamma'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

a

-> Tensor v'2 t

x

-> Tensor Build t

z

igammac

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

a

-> Tensor v'2 t

x

-> Tensor Build t

z

Compute the upper regularized incomplete Gamma function `Q(a, x)`.

The upper regularized incomplete Gamma function is defined as:

``` + Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) + ``` + where + ``` + Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt + ``` + is the upper incomplete Gama function.

Note, above `P(a, x)` (Igamma) is the lower regularized complete + Gamma function.

igammac'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

a

-> Tensor v'2 t

x

-> Tensor Build t

z

imag

Arguments

:: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
=> Tensor v'1 t

input

-> Tensor Build tout

output

Returns the imaginary part of a complex number.

Given a tensor input of complex numbers, this operation returns a tensor of + type float that is the imaginary part of each element in input. All + elements in input must be complex numbers of the form \(a + bj\), where *a* + is the real part and *b* is the imaginary part returned by this operation.

For example:

``` + # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] + tf.imag(input) ==> [4.75, 5.75] + ```

imag'

Arguments

:: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build tout

output

imageSummary

Arguments

:: OneOf `[Word16, Word8, Float]` t 
=> Tensor v'1 ByteString

tag: Scalar. Used to build the tag attribute of the summary values.

-> Tensor v'2 t

tensor: 4-D of shape `[batch_size, height, width, channels]` where + channels is 1, 3, or 4.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

Outputs a Summary protocol buffer with images.

The summary has up to max_images summary values containing images. The + images are built from tensor which must be 4-D with shape `[batch_size, + height, width, channels]` and where channels can be:

  • 1: tensor is interpreted as Grayscale.
  • 3: tensor is interpreted as RGB.
  • 4: tensor is interpreted as RGBA.

The images have the same number of channels as the input tensor. For float + input, the values are normalized one image at a time to fit in the range + `[0, 255]`. uint8 values are unchanged. The op uses two different + normalization algorithms:

  • If the input values are all positive, they are rescaled so the largest one + is 255.
  • If any input value is negative, the values are shifted so input value 0.0 + is at 127. They are then rescaled so that either the smallest value is 0, + or the largest one is 255.

The tag argument is a scalar Tensor of type string. It is used to + build the tag of the summary values:

  • If max_images is 1, the summary value tag is '*tag*/image'.
  • If max_images is greater than 1, the summary value tags are + generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.

The bad_color argument is the color to use in the generated images for + non-finite input values. It is a unit8 1-D tensor of length channels. + Each element must be in the range `[0, 255]` (It represents the value of a + pixel in the output image). Non-finite values in the input tensor are + replaced by this tensor in the output image. The default value is the color + red.

imageSummary'

Arguments

:: OneOf `[Word16, Word8, Float]` t 
=> OpParams 
-> Tensor v'1 ByteString

tag: Scalar. Used to build the tag attribute of the summary values.

-> Tensor v'2 t

tensor: 4-D of shape `[batch_size, height, width, channels]` where + channels is 1, 3, or 4.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

immutableConst

Arguments

:: TensorType dtype 
=> Shape

shape: Shape of the returned tensor.

-> Tensor Build dtype

tensor

Returns immutable tensor from memory region.

The current implementation memmaps the tensor from a file.

immutableConst'

Arguments

:: TensorType dtype 
=> OpParams 
-> Shape

shape: Shape of the returned tensor.

-> Tensor Build dtype

tensor

inTopK

Arguments

:: OneOf `[Int32, Int64]` t 
=> Int64

k: Number of top elements to look at for computing precision.

-> Tensor v'1 Float

predictions: A batch_size x classes tensor.

-> Tensor v'2 t

targets: A batch_size vector of class ids.

-> Tensor Build Bool

precision: Computed Precision at k as a `bool Tensor`.

Says whether the targets are in the top K predictions.

This outputs a batch_size bool array, an entry `out[i]` is true if the + prediction for the target class is among the top k predictions among + all predictions for example i. Note that the behavior of InTopK differs + from the TopK op in its handling of ties; if multiple classes have the + same prediction value and straddle the top-k boundary, all of those + classes are considered to be in the top k.

More formally, let

\(predictions_i\) be the predictions for all classes for example i, + \(targets_i\) be the target class for example i, + \(out_i\) be the output for example i,

$$out_i = predictions_{i, targets_i} in TopKIncludingTies(predictions_i)$$

inTopK'

Arguments

:: OneOf `[Int32, Int64]` t 
=> OpParams 
-> Int64

k: Number of top elements to look at for computing precision.

-> Tensor v'1 Float

predictions: A batch_size x classes tensor.

-> Tensor v'2 t

targets: A batch_size vector of class ids.

-> Tensor Build Bool

precision: Computed Precision at k as a `bool Tensor`.

initializeTable

Arguments

:: (MonadBuild m', TensorType tkey, TensorType tval) 
=> Tensor Ref ByteString

table_handle: Handle to a table which will be initialized.

-> Tensor v'2 tkey

keys: Keys of type Tkey.

-> Tensor v'3 tval

values: Values of type Tval.

-> m' ControlNode 

Table initializer that takes two tensors for keys and values respectively.

initializeTable'

Arguments

:: (MonadBuild m', TensorType tkey, TensorType tval) 
=> OpParams 
-> Tensor Ref ByteString

table_handle: Handle to a table which will be initialized.

-> Tensor v'2 tkey

keys: Keys of type Tkey.

-> Tensor v'3 tval

values: Values of type Tval.

-> m' ControlNode 

initializeTableFromTextFile

Arguments

:: MonadBuild m' 
=> Int64

key_index: Column index in a line to get the table key values from.

-> Int64

value_index: Column index that represents information of a line to get the table + value values from.

-> Tensor Ref ByteString

table_handle: Handle to a table which will be initialized.

-> Tensor v'2 ByteString

filename: Filename of a vocabulary text file.

-> m' ControlNode 

Initializes a table from a text file.

It inserts one key-value pair into the table for each line of the file. + The key and value is extracted from the whole line content, elements from the + split line based on delimiter or the line number (starting from zero). + Where to extract the key and value from a line is specified by key_index and + value_index.

  • A value of -1 means use the line number(starting from zero), expects int64.
  • A value of -2 means use the whole line content, expects string.
  • A value >= 0 means use the index (starting at zero) of the split line based + on delimiter.

initializeTableFromTextFile'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Int64

key_index: Column index in a line to get the table key values from.

-> Int64

value_index: Column index that represents information of a line to get the table + value values from.

-> Tensor Ref ByteString

table_handle: Handle to a table which will be initialized.

-> Tensor v'2 ByteString

filename: Filename of a vocabulary text file.

-> m' ControlNode 

inv

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes the reciprocal of x element-wise.

I.e., \(y = 1 / x\).

invGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Computes the gradient for the inverse of x wrt its input.

Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy + is the corresponding input gradient.

invGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

invertPermutation

Arguments

:: OneOf `[Int32, Int64]` t 
=> Tensor v'1 t

x: 1-D.

-> Tensor Build t

y: 1-D.

Computes the inverse permutation of a tensor.

This operation computes the inverse of an index permutation. It takes a 1-D + integer tensor x, which represents the indices of a zero-based array, and + swaps each value with its index position. In other words, for an output tensor + y and an input tensor x, this operation computes the following:

`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`

The values must include 0. There can be no duplicate values or negative values.

For example:

```prettyprint + # tensor x is [3, 4, 0, 2, 1] + invert_permutation(x) ==> [2, 4, 3, 0, 1] + ```

invertPermutation'

Arguments

:: OneOf `[Int32, Int64]` t 
=> OpParams 
-> Tensor v'1 t

x: 1-D.

-> Tensor Build t

y: 1-D.

isFinite

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build Bool

y

Returns which elements of x are finite.

compatibility(numpy) + Equivalent to np.isfinite + end_compatibility

isFinite'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build Bool

y

isInf

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build Bool

y

Returns which elements of x are Inf.

compatibility(numpy) + Equivalent to np.isinf + end_compatibility

isInf'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build Bool

y

isNan

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build Bool

y

Returns which elements of x are NaN.

compatibility(numpy) + Equivalent to np.isnan + end_compatibility

isNan'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build Bool

y

isVariableInitialized

Arguments

:: (MonadBuild m', TensorType dtype) 
=> Tensor Ref dtype

ref: Should be from a Variable node. May be uninitialized.

-> m' (Tensor Value Bool)

is_initialized

Checks whether a tensor has been initialized.

Outputs boolean scalar indicating whether the tensor has been initialized.

isVariableInitialized'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> Tensor Ref dtype

ref: Should be from a Variable node. May be uninitialized.

-> m' (Tensor Value Bool)

is_initialized

l2Loss

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

t: Typically 2-D, but may have any dimensions.

-> Tensor Build t

output: 0-D.

L2 Loss.

Computes half the L2 norm of a tensor without the sqrt:

output = sum(t ** 2) / 2

l2Loss'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

t: Typically 2-D, but may have any dimensions.

-> Tensor Build t

output: 0-D.

lRN

Arguments

:: OneOf `[Word16, Float]` t 
=> Tensor v'1 t

input: 4-D.

-> Tensor Build t

output

Local Response Normalization.

The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last + dimension), and each vector is normalized independently. Within a given vector, + each component is divided by the weighted, squared sum of inputs within + depth_radius. In detail,

sqr_sum[a, b, c, d] = + sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + output = input / (bias + alpha * sqr_sum) ** beta

For details, see Krizhevsky et al., ImageNet classification with deep + convolutional neural networks (NIPS 2012).

lRN'

Arguments

:: OneOf `[Word16, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: 4-D.

-> Tensor Build t

output

lRNGrad

Arguments

:: OneOf `[Word16, Float]` t 
=> Tensor v'1 t

input_grads: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 t

input_image: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'3 t

output_image: 4-D with shape `[batch, height, width, channels]`.

-> Tensor Build t

output: The gradients for LRN.

Gradients for Local Response Normalization.

lRNGrad'

Arguments

:: OneOf `[Word16, Float]` t 
=> OpParams 
-> Tensor v'1 t

input_grads: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 t

input_image: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'3 t

output_image: 4-D with shape `[batch, height, width, channels]`.

-> Tensor Build t

output: The gradients for LRN.

learnedUnigramCandidateSampler

Arguments

:: Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

Generates labels for candidate sampling with a learned unigram distribution.

See explanations of candidate sampling and the data formats at + go/candidate-sampling.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

learnedUnigramCandidateSampler'

Arguments

:: OpParams 
-> Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

less

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build Bool

z

Returns the truth value of (x < y) element-wise.

  • NOTE*: Less supports broadcasting. More about broadcasting + here

less'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build Bool

z

lessEqual

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build Bool

z

Returns the truth value of (x <= y) element-wise.

  • NOTE*: LessEqual supports broadcasting. More about broadcasting + here

lessEqual'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build Bool

z

lgamma

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes the log of the absolute value of `Gamma(x)` element-wise.

lgamma'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

linSpace

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

start: First entry in the range.

-> Tensor v'2 t

stop: Last entry in the range.

-> Tensor v'3 tidx

num: Number of values to generate.

-> Tensor Build t

output: 1-D. The generated values.

Generates values in an interval.

A sequence of num evenly-spaced values are generated beginning at start. + If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, + so that the last one is exactly stop.

For example:

``` + tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] + ```

linSpace'

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

start: First entry in the range.

-> Tensor v'2 t

stop: Last entry in the range.

-> Tensor v'3 tidx

num: Number of values to generate.

-> Tensor Build t

output: 1-D. The generated values.

listDiff

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
=> Tensor v'1 t

x: 1-D. Values to keep.

-> Tensor v'2 t

y: 1-D. Values to remove.

-> (Tensor Build t, Tensor Build out_idx)

(out, idx)

  • out: 1-D. Values present in x but not in y.
  • idx: 1-D. Positions of x values preserved in out.

Computes the difference between two lists of numbers or strings.

Given a list x and a list y, this operation returns a list out that + represents all values that are in x but not in y. The returned list out + is sorted in the same order that the numbers appear in x (duplicates are + preserved). This operation also returns a list idx that represents the + position of each out element in x. In other words:

`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`

For example, given this input:

```prettyprint + x = [1, 2, 3, 4, 5, 6] + y = [1, 3, 5] + ```

This operation would return:

```prettyprint + out ==> [2, 4, 6] + idx ==> [1, 3, 5] + ```

listDiff'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
=> OpParams 
-> Tensor v'1 t

x: 1-D. Values to keep.

-> Tensor v'2 t

y: 1-D. Values to remove.

-> (Tensor Build t, Tensor Build out_idx)

(out, idx)

  • out: 1-D. Values present in x but not in y.
  • idx: 1-D. Positions of x values preserved in out.

log

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes natural logarithm of x element-wise.

I.e., \(y = log_e x\).

log'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

log1p

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes natural logarithm of (1 + x) element-wise.

I.e., \(y = log_e (1 + x)\).

log1p'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

logSoftmax

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

logits: 2-D with shape `[batch_size, num_classes]`.

-> Tensor Build t

logsoftmax: Same shape as logits.

Computes log softmax activations.

For each batch i and class j we have

logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))

logSoftmax'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

logits: 2-D with shape `[batch_size, num_classes]`.

-> Tensor Build t

logsoftmax: Same shape as logits.

logUniformCandidateSampler

Arguments

:: Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

Generates labels for candidate sampling with a log-uniform distribution.

See explanations of candidate sampling and the data formats at + go/candidate-sampling.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

logUniformCandidateSampler'

Arguments

:: OpParams 
-> Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

logicalAnd

Arguments

:: Tensor v'1 Bool

x

-> Tensor v'2 Bool

y

-> Tensor Build Bool

z

Returns the truth value of x AND y element-wise.

  • NOTE*: LogicalAnd supports broadcasting. More about broadcasting + here

logicalAnd'

Arguments

:: OpParams 
-> Tensor v'1 Bool

x

-> Tensor v'2 Bool

y

-> Tensor Build Bool

z

logicalNot

Arguments

:: Tensor v'1 Bool

x

-> Tensor Build Bool

y

Returns the truth value of NOT x element-wise.

logicalOr

Arguments

:: Tensor v'1 Bool

x

-> Tensor v'2 Bool

y

-> Tensor Build Bool

z

Returns the truth value of x OR y element-wise.

  • NOTE*: LogicalOr supports broadcasting. More about broadcasting + here

logicalOr'

Arguments

:: OpParams 
-> Tensor v'1 Bool

x

-> Tensor v'2 Bool

y

-> Tensor Build Bool

z

lookupTableExport

Arguments

:: (MonadBuild m', TensorType tkeys, TensorType tvalues) 
=> Tensor Ref ByteString

table_handle: Handle to the table.

-> m' (Tensor Value tkeys, Tensor Value tvalues)

(keys, values)

  • keys: Vector of all keys present in the table.
  • values: Tensor of all values in the table. Indexed in parallel with keys.

Outputs all keys and values in the table.

lookupTableExport'

Arguments

:: (MonadBuild m', TensorType tkeys, TensorType tvalues) 
=> OpParams 
-> Tensor Ref ByteString

table_handle: Handle to the table.

-> m' (Tensor Value tkeys, Tensor Value tvalues)

(keys, values)

  • keys: Vector of all keys present in the table.
  • values: Tensor of all values in the table. Indexed in parallel with keys.

lookupTableFind

Arguments

:: (MonadBuild m', TensorType tin, TensorType tout) 
=> Tensor Ref ByteString

table_handle: Handle to the table.

-> Tensor v'2 tin

keys: Any shape. Keys to look up.

-> Tensor v'3 tout

default_value

-> m' (Tensor Value tout)

values: Same shape as keys. Values found in the table, or default_values + for missing keys.

Looks up keys in a table, outputs the corresponding values.

The tensor keys must of the same type as the keys of the table. + The output values is of the type of the table values.

The scalar default_value is the value output for keys not present in the + table. It must also be of the same type as the table values.

lookupTableFind'

Arguments

:: (MonadBuild m', TensorType tin, TensorType tout) 
=> OpParams 
-> Tensor Ref ByteString

table_handle: Handle to the table.

-> Tensor v'2 tin

keys: Any shape. Keys to look up.

-> Tensor v'3 tout

default_value

-> m' (Tensor Value tout)

values: Same shape as keys. Values found in the table, or default_values + for missing keys.

lookupTableImport

Arguments

:: (MonadBuild m', TensorType tin, TensorType tout) 
=> Tensor Ref ByteString

table_handle: Handle to the table.

-> Tensor v'2 tin

keys: Any shape. Keys to look up.

-> Tensor v'3 tout

values: Values to associate with keys.

-> m' ControlNode 

Replaces the contents of the table with the specified keys and values.

The tensor keys must be of the same type as the keys of the table. + The tensor values must be of the type of the table values.

lookupTableImport'

Arguments

:: (MonadBuild m', TensorType tin, TensorType tout) 
=> OpParams 
-> Tensor Ref ByteString

table_handle: Handle to the table.

-> Tensor v'2 tin

keys: Any shape. Keys to look up.

-> Tensor v'3 tout

values: Values to associate with keys.

-> m' ControlNode 

lookupTableInsert

Arguments

:: (MonadBuild m', TensorType tin, TensorType tout) 
=> Tensor Ref ByteString

table_handle: Handle to the table.

-> Tensor v'2 tin

keys: Any shape. Keys to look up.

-> Tensor v'3 tout

values: Values to associate with keys.

-> m' ControlNode 

Updates the table to associates keys with values.

The tensor keys must be of the same type as the keys of the table. + The tensor values must be of the type of the table values.

lookupTableInsert'

Arguments

:: (MonadBuild m', TensorType tin, TensorType tout) 
=> OpParams 
-> Tensor Ref ByteString

table_handle: Handle to the table.

-> Tensor v'2 tin

keys: Any shape. Keys to look up.

-> Tensor v'3 tout

values: Values to associate with keys.

-> m' ControlNode 

lookupTableSize

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

table_handle: Handle to the table.

-> m' (Tensor Value Int64)

size: Scalar that contains number of elements in the table.

Computes the number of elements in the given table.

lookupTableSize'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

table_handle: Handle to the table.

-> m' (Tensor Value Int64)

size: Scalar that contains number of elements in the table.

loopCond

Arguments

:: Tensor v'1 Bool

input: A boolean scalar, representing the branch predicate of the Switch op.

-> Tensor Build Bool

output: The same tensor as input.

Forwards the input to the output.

This operator represents the loop termination condition used by the + "pivot" switches of a loop.

loopCond'

Arguments

:: OpParams 
-> Tensor v'1 Bool

input: A boolean scalar, representing the branch predicate of the Switch op.

-> Tensor Build Bool

output: The same tensor as input.

matMul

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t 
=> Tensor v'1 t

a

-> Tensor v'2 t

b

-> Tensor Build t

product

Multiply the matrix "a" by the matrix "b".

The inputs must be two-dimensional matrices and the inner dimension of + "a" (after being transposed if transpose_a is true) must match the + outer dimension of "b" (after being transposed if transposed_b is + true).

  • Note*: The default kernel implementation for MatMul on GPUs uses + cublas.

matMul'

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

a

-> Tensor v'2 t

b

-> Tensor Build t

product

matchingFiles

Arguments

:: Tensor v'1 ByteString

pattern: A (scalar) shell wildcard pattern.

-> Tensor Build ByteString

filenames: A vector of matching filenames.

Returns the set of files matching a pattern.

Note that this routine only supports wildcard characters in the + basename portion of the pattern, not in the directory portion.

matchingFiles'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

pattern: A (scalar) shell wildcard pattern.

-> Tensor Build ByteString

filenames: A vector of matching filenames.

matrixBandPart

Arguments

:: TensorType t 
=> Tensor v'1 t

input: Rank k tensor.

-> Tensor v'2 Int64

num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire + lower triangle.

-> Tensor v'3 Int64

num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep + entire upper triangle.

-> Tensor Build t

band: Rank k tensor of the same shape as input. The extracted banded tensor.

Copy a tensor setting everything outside a central band in each innermost matrix

to zero.

The band part is computed as follows: + Assume input has k dimensions `[I, J, K, ..., M, N]`, then the output is a + tensor with the same shape where

`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.

The indicator function

`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && + (num_upper < 0 || (n-m) <= num_upper)`.

For example:

```prettyprint + # if input is [[ 0, 1, 2, 3] + [-1, 0, 1, 2] + [-2, -1, 0, 1] + [-3, -2, -1, 0]],

tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] + [-1, 0, 1, 2] + [ 0, -1, 0, 1] + [ 0, 0, -1, 0]],

tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] + [-1, 0, 1, 0] + [-2, -1, 0, 1] + [ 0, -2, -1, 0]] + ```

Useful special cases:

```prettyprint + tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. + tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. + tf.matrix_band_part(input, 0, 0) ==> Diagonal. + ```

matrixBandPart'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input: Rank k tensor.

-> Tensor v'2 Int64

num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire + lower triangle.

-> Tensor v'3 Int64

num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep + entire upper triangle.

-> Tensor Build t

band: Rank k tensor of the same shape as input. The extracted banded tensor.

matrixDeterminant

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input: Shape is `[..., M, M]`.

-> Tensor Build t

output: Shape is `[...]`.

Computes the determinant of one ore more square matrices.

The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices. The output is a tensor containing the determinants + for all input submatrices `[..., :, :]`.

matrixDeterminant'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Shape is `[..., M, M]`.

-> Tensor Build t

output: Shape is `[...]`.

matrixDiag

Arguments

:: TensorType t 
=> Tensor v'1 t

diagonal: Rank k, where `k >= 1`.

-> Tensor Build t

output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.

Returns a batched diagonal tensor with a given batched diagonal values.

Given a diagonal, this operation returns a tensor with the diagonal and + everything else padded with zeros. The diagonal is computed as follows:

Assume diagonal has k dimensions `[I, J, K, ..., N]`, then the output is a + tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:

`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.

For example:

```prettyprint + # diagonal is [[1, 2, 3, 4], [5, 6, 7, 8]]

and diagonal.shape = (2, 4)

tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] + [0, 2, 0, 0] + [0, 0, 3, 0] + [0, 0, 0, 4]], + [[5, 0, 0, 0] + [0, 6, 0, 0] + [0, 0, 7, 0] + [0, 0, 0, 8]]]

which has shape (2, 4, 4) + ```

matrixDiag'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

diagonal: Rank k, where `k >= 1`.

-> Tensor Build t

output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.

matrixDiagPart

Arguments

:: TensorType t 
=> Tensor v'1 t

input: Rank k tensor where `k >= 2`.

-> Tensor Build t

diagonal: The extracted diagonal(s) having shape + `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.

Returns the batched diagonal part of a batched tensor.

This operation returns a tensor with the diagonal part + of the batched input. The diagonal part is computed as follows:

Assume input has k dimensions `[I, J, K, ..., M, N]`, then the output is a + tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:

`diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.

The input must be at least a matrix.

For example:

```prettyprint + # input is [[[1, 0, 0, 0] + [0, 2, 0, 0] + [0, 0, 3, 0] + [0, 0, 0, 4]], + [[5, 0, 0, 0] + [0, 6, 0, 0] + [0, 0, 7, 0] + [0, 0, 0, 8]]]

and input.shape = (2, 4, 4)

tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]

which has shape (2, 4) + ```

matrixDiagPart'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input: Rank k tensor where `k >= 2`.

-> Tensor Build t

diagonal: The extracted diagonal(s) having shape + `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.

matrixInverse

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input: Shape is `[..., M, M]`.

-> Tensor Build t

output: Shape is `[..., M, M]`.

compatibility(numpy) + Equivalent to np.linalg.inv + end_compatibility

Computes the inverse of one or more square invertible matrices or their

adjoints (conjugate transposes).

The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices. The output is a tensor of the same shape as the input + containing the inverse for all input submatrices `[..., :, :]`.

The op uses LU decomposition with partial pivoting to compute the inverses.

If a matrix is not invertible there is no guarantee what the op does. It + may detect the condition and raise an exception or it may simply return a + garbage result.

matrixInverse'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Shape is `[..., M, M]`.

-> Tensor Build t

output: Shape is `[..., M, M]`.

compatibility(numpy) + Equivalent to np.linalg.inv + end_compatibility

matrixSetDiag

Arguments

:: TensorType t 
=> Tensor v'1 t

input: Rank `k+1`, where `k >= 1`.

-> Tensor v'2 t

diagonal: Rank k, where `k >= 1`.

-> Tensor Build t

output: Rank `k+1`, with `output.shape = input.shape`.

Returns a batched matrix tensor with new batched diagonal values.

Given input and diagonal, this operation returns a tensor with the + same shape and values as input, except for the main diagonal of the + innermost matrices. These will be overwritten by the values in diagonal.

The output is computed as follows:

Assume input has `k+1` dimensions `[I, J, K, ..., M, N]` and diagonal has + k dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a + tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:

  • `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
  • `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.

matrixSetDiag'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input: Rank `k+1`, where `k >= 1`.

-> Tensor v'2 t

diagonal: Rank k, where `k >= 1`.

-> Tensor Build t

output: Rank `k+1`, with `output.shape = input.shape`.

matrixSolve

Arguments

:: OneOf `[Complex Double, Complex Float, Double, Float]` t 
=> Tensor v'1 t

matrix: Shape is `[..., M, M]`.

-> Tensor v'2 t

rhs: Shape is `[..., M, K]`.

-> Tensor Build t

output: Shape is `[..., M, K]`.

Solves systems of linear equations.

Matrix is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices. Rhs is a tensor of shape `[..., M, K]`. The output is + a tensor shape `[..., M, K]`. If adjoint is False then each output matrix + satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + If adjoint is True then each output matrix satisfies + `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.

matrixSolve'

Arguments

:: OneOf `[Complex Double, Complex Float, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

matrix: Shape is `[..., M, M]`.

-> Tensor v'2 t

rhs: Shape is `[..., M, K]`.

-> Tensor Build t

output: Shape is `[..., M, K]`.

matrixSolveLs

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

matrix: Shape is `[..., M, N]`.

-> Tensor v'2 t

rhs: Shape is `[..., M, K]`.

-> Tensor v'3 Double

l2_regularizer: Scalar tensor.

compatibility(numpy) + Equivalent to np.linalg.lstsq + end_compatibility

-> Tensor Build t

output: Shape is `[..., N, K]`.

Solves one or more linear least-squares problems.

matrix is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions + form matrices of size `[M, N]`. Rhs is a tensor of shape `[..., M, K]`. + The output is a tensor shape `[..., N, K]` where each output matrix solves + each of the equations matrix[..., :, :] * output[..., :, :] = rhs[..., :, :] + in the least squares sense.

matrix and right-hand sides in the batch:

matrix=\(A in Re^{m times n}\), + rhs=\(B in Re^{m times k}\), + output=\(X in Re^{n times k}\), + l2_regularizer=\(lambda\).

If fast is True, then the solution is computed by solving the normal + equations using Cholesky decomposition. Specifically, if \(m ge n\) then + \(X = (A^T A + lambda I)^{-1} A^T B\), which solves the least-squares + problem \(X = mathrm{argmin}_{Z in Re^{n times k} } ||A Z - B||_F^2 + + lambda ||Z||_F^2\). If \(m lt n\) then output is computed as + \(X = A^T (A A^T + lambda I)^{-1} B\), which (for \(lambda = 0\)) is the + minimum-norm solution to the under-determined linear system, i.e. + \(X = mathrm{argmin}_{Z in Re^{n times k} } ||Z||_F^2 \), subject to + \(A Z = B\). Notice that the fast path is only numerically stable when + \(A\) is numerically full rank and has a condition number + \(mathrm{cond}(A) lt frac{1}{sqrt{epsilon_{mach} } }\) or\(lambda\) is + sufficiently large.

If fast is False an algorithm based on the numerically robust complete + orthogonal decomposition is used. This computes the minimum-norm + least-squares solution, even when \(A\) is rank deficient. This path is + typically 6-7 times slower than the fast path. If fast is False then + l2_regularizer is ignored.

matrixSolveLs'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

matrix: Shape is `[..., M, N]`.

-> Tensor v'2 t

rhs: Shape is `[..., M, K]`.

-> Tensor v'3 Double

l2_regularizer: Scalar tensor.

compatibility(numpy) + Equivalent to np.linalg.lstsq + end_compatibility

-> Tensor Build t

output: Shape is `[..., N, K]`.

matrixTriangularSolve

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

matrix: Shape is `[..., M, M]`.

-> Tensor v'2 t

rhs: Shape is `[..., M, K]`.

-> Tensor Build t

output: Shape is `[..., M, K]`.

Solves systems of linear equations with upper or lower triangular matrices by

backsubstitution.

matrix is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + square matrices. If lower is True then the strictly upper triangular part + of each inner-most matrix is assumed to be zero and not accessed. + If lower is False then the strictly lower triangular part of each inner-most + matrix is assumed to be zero and not accessed. + rhs is a tensor of shape `[..., M, K]`.

The output is a tensor of shape `[..., M, K]`. If adjoint is + True then the innermost matrices in output` satisfy matrix equations + `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + If adjoint is False then the strictly then the innermost matrices in + output satisfy matrix equations + `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.

matrixTriangularSolve'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

matrix: Shape is `[..., M, M]`.

-> Tensor v'2 t

rhs: Shape is `[..., M, K]`.

-> Tensor Build t

output: Shape is `[..., M, K]`.

max

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build t

output: The reduced tensor.

Computes the maximum of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

max'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build t

output: The reduced tensor.

maxPool

Arguments

:: OneOf `[Word16, Float]` t 
=> Tensor v'1 t

input: 4-D input to pool over.

-> Tensor Build t

output: The max pooled output tensor.

Performs max pooling on the input.

maxPool'

Arguments

:: OneOf `[Word16, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: 4-D input to pool over.

-> Tensor Build t

output: The max pooled output tensor.

maxPool3D

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

-> Tensor Build t

output: The max pooled output tensor.

Performs 3D max pooling on the input.

maxPool3D'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

-> Tensor Build t

output: The max pooled output tensor.

maxPool3DGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 Float

orig_input: The original input tensor.

-> Tensor v'2 Float

orig_output: The original output tensor.

-> Tensor v'3 t

grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

-> Tensor Build t

output

Computes gradients of max pooling function.

maxPool3DGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Float

orig_input: The original input tensor.

-> Tensor v'2 Float

orig_output: The original output tensor.

-> Tensor v'3 t

grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

-> Tensor Build t

output

maxPoolGrad

Arguments

:: OneOf `[Word16, Float]` t 
=> Tensor v'1 t

orig_input: The original input tensor.

-> Tensor v'2 t

orig_output: The original output tensor.

-> Tensor v'3 t

grad: 4-D. Gradients w.r.t. the output of max_pool.

-> Tensor Build t

output: Gradients w.r.t. the input to max_pool.

Computes gradients of the maxpooling function.

maxPoolGrad'

Arguments

:: OneOf `[Word16, Float]` t 
=> OpParams 
-> Tensor v'1 t

orig_input: The original input tensor.

-> Tensor v'2 t

orig_output: The original output tensor.

-> Tensor v'3 t

grad: 4-D. Gradients w.r.t. the output of max_pool.

-> Tensor Build t

output: Gradients w.r.t. the input to max_pool.

maxPoolGradWithArgmax

Arguments

:: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) 
=> Tensor v'1 t

input: The original input.

-> Tensor v'2 t

grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + output of max_pool.

-> Tensor v'3 targmax

argmax: The indices of the maximum values chosen for each output of max_pool.

-> Tensor Build t

output: Gradients w.r.t. the input of max_pool.

Computes gradients of the maxpooling function.

maxPoolGradWithArgmax'

Arguments

:: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) 
=> OpParams 
-> Tensor v'1 t

input: The original input.

-> Tensor v'2 t

grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + output of max_pool.

-> Tensor v'3 targmax

argmax: The indices of the maximum values chosen for each output of max_pool.

-> Tensor Build t

output: Gradients w.r.t. the input of max_pool.

maxPoolWithArgmax

Arguments

:: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) 
=> Tensor v'1 t

input: 4-D with shape `[batch, height, width, channels]`. Input to pool over.

-> (Tensor Build t, Tensor Build targmax)

(output, argmax)

  • output: The max pooled output tensor.
  • argmax: 4-D. The flattened indices of the max values chosen for each output.

Performs max pooling on the input and outputs both max values and indices.

The indices in argmax are flattened, so that a maximum value at position + `[b, y, x, c]` becomes flattened index + `((b * height + y) * width + x) * channels + c`.

maxPoolWithArgmax'

Arguments

:: (OneOf `[Int32, Int64]` targmax, OneOf `[Word16, Float]` t) 
=> OpParams 
-> Tensor v'1 t

input: 4-D with shape `[batch, height, width, channels]`. Input to pool over.

-> (Tensor Build t, Tensor Build targmax)

(output, argmax)

  • output: The max pooled output tensor.
  • argmax: 4-D. The flattened indices of the max values chosen for each output.

maximum

Arguments

:: OneOf `[Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns the max of x and y (i.e. x > y ? x : y) element-wise.

  • NOTE*: Maximum supports broadcasting. More about broadcasting + here

maximum'

Arguments

:: OneOf `[Int32, Int64, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

mean

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build t

output: The reduced tensor.

Computes the mean of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

mean'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build t

output: The reduced tensor.

merge

Arguments

:: TensorType t 
=> [Tensor v'1 t]

inputs: The input tensors, exactly one of which will become available.

-> (Tensor Build t, Tensor Build Int32)

(output, value_index)

  • output: Will be set to the available input tensor.
  • value_index: The index of the chosen input tensor in inputs.

Forwards the value of an available tensor from inputs to output.

Merge waits for at least one of the tensors in inputs to become available. + It is usually combined with Switch to implement branching.

Merge forwards the first tensor for become available to output, and sets + value_index to its index in inputs.

merge'

Arguments

:: TensorType t 
=> OpParams 
-> [Tensor v'1 t]

inputs: The input tensors, exactly one of which will become available.

-> (Tensor Build t, Tensor Build Int32)

(output, value_index)

  • output: Will be set to the available input tensor.
  • value_index: The index of the chosen input tensor in inputs.

mergeSummary

Arguments

:: [Tensor v'1 ByteString]

inputs: Can be of any shape. Each must contain serialized Summary protocol + buffers.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

Merges summaries.

This op creates a + `Summary` + protocol buffer that contains the union of all the values in the input + summaries.

When the Op is run, it reports an InvalidArgument error if multiple values + in the summaries to merge use the same tag.

mergeSummary'

Arguments

:: OpParams 
-> [Tensor v'1 ByteString]

inputs: Can be of any shape. Each must contain serialized Summary protocol + buffers.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

mergeV2Checkpoints

Arguments

:: MonadBuild m' 
=> Tensor v'1 ByteString

checkpoint_prefixes: prefixes of V2 checkpoints to merge.

-> Tensor v'2 ByteString

destination_prefix: scalar. The desired final prefix. Allowed to be the same + as one of the checkpoint_prefixes.

-> m' ControlNode 

V2 format specific: merges the metadata files of sharded checkpoints. The

result is one logical checkpoint, with one physical metadata file and renamed + data files.

Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.

If delete_old_dirs is true, attempts to delete recursively the dirname of each + path in the input checkpoint_prefixes. This is useful when those paths are non + user-facing temporary locations.

mergeV2Checkpoints'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor v'1 ByteString

checkpoint_prefixes: prefixes of V2 checkpoints to merge.

-> Tensor v'2 ByteString

destination_prefix: scalar. The desired final prefix. Allowed to be the same + as one of the checkpoint_prefixes.

-> m' ControlNode 

min

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build t

output: The reduced tensor.

Computes the minimum of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

min'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build t

output: The reduced tensor.

minimum

Arguments

:: OneOf `[Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns the min of x and y (i.e. x < y ? x : y) element-wise.

  • NOTE*: Minimum supports broadcasting. More about broadcasting + here

minimum'

Arguments

:: OneOf `[Int32, Int64, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

mirrorPad

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
=> Tensor v'1 t

input: The input tensor to be padded.

-> Tensor v'2 tpaddings

paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

-> Tensor Build t

output: The padded tensor.

Pads a tensor with mirrored values.

This operation pads a input with mirrored values according to the paddings + you specify. paddings is an integer tensor with shape `[n, 2]`, where n is + the rank of input. For each dimension D of input, `paddings[D, 0]` indicates + how many values to add before the contents of input in that dimension, and + `paddings[D, 1]` indicates how many values to add after the contents of input + in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater + than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if copy_border is true + (if false, respectively).

The padded size of each dimension D of the output is:

`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`

For example:

```prettyprint + # t is [[1, 2, 3], [4, 5, 6]]. + # paddings is [[1, 1]], [2, 2]]. + # mode is SYMMETRIC. + # rank of t is 2. + pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] + [2, 1, 1, 2, 3, 3, 2] + [5, 4, 4, 5, 6, 6, 5] + [5, 4, 4, 5, 6, 6, 5]] + ```

mirrorPad'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
=> OpParams 
-> Tensor v'1 t

input: The input tensor to be padded.

-> Tensor v'2 tpaddings

paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

-> Tensor Build t

output: The padded tensor.

mirrorPadGrad

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
=> Tensor v'1 t

input: The input tensor to be folded.

-> Tensor v'2 tpaddings

paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

-> Tensor Build t

output: The folded tensor.

Gradient op for MirrorPad op. This op folds a mirror-padded tensor.

This operation folds the padded areas of input by MirrorPad according to the + paddings you specify. paddings must be the same as paddings argument + given to the corresponding MirrorPad op.

The folded size of each dimension D of the output is:

`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`

For example:

```prettyprint + # t is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. + # paddings is [[0, 1]], [0, 1]]. + # mode is SYMMETRIC. + # rank of t is 2. + pad(t, paddings) ==> [[ 1, 5] + [11, 28]] + ```

mirrorPadGrad'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
=> OpParams 
-> Tensor v'1 t

input: The input tensor to be folded.

-> Tensor v'2 tpaddings

paddings: A two-column matrix specifying the padding sizes. The number of + rows must be the same as the rank of input.

-> Tensor Build t

output: The folded tensor.

mod

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns element-wise remainder of division.

  • NOTE*: Mod supports broadcasting. More about broadcasting + here

mod'

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

mul

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns x * y element-wise.

  • NOTE*: Mul supports broadcasting. More about broadcasting + here

mul'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

multinomial

Arguments

:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v'1 t

logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + represents the unnormalized log probabilities for all classes.

-> Tensor v'2 Int32

num_samples: 0-D. Number of independent samples to draw for each row slice.

-> m' (Tensor Value Int64)

output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` + contains the drawn class labels with range `[0, num_classes)`.

Draws samples from a multinomial distribution.

multinomial'

Arguments

:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor v'1 t

logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + represents the unnormalized log probabilities for all classes.

-> Tensor v'2 Int32

num_samples: 0-D. Number of independent samples to draw for each row slice.

-> m' (Tensor Value Int64)

output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` + contains the drawn class labels with range `[0, num_classes)`.

mutableDenseHashTable

Arguments

:: (MonadBuild m', TensorType key_dtype) 
=> DataType

value_dtype: Type of the table values.

-> Tensor v'1 key_dtype

empty_key: The key used to represent empty key buckets internally. Must not + be used in insert or lookup operations.

-> m' (Tensor Ref ByteString)

table_handle: Handle to a table.

Creates an empty hash table that uses tensors as the backing store. It uses

"open addressing" with quadratic reprobing to resolve collisions.

This op creates a mutable hash table, specifying the type of its keys and + values. Each value must be a scalar. Data can be inserted into the table using + the insert operations. It does not support the initialization operation.

mutableDenseHashTable'

Arguments

:: (MonadBuild m', TensorType key_dtype) 
=> OpParams 
-> DataType

value_dtype: Type of the table values.

-> Tensor v'1 key_dtype

empty_key: The key used to represent empty key buckets internally. Must not + be used in insert or lookup operations.

-> m' (Tensor Ref ByteString)

table_handle: Handle to a table.

mutableHashTable

Arguments

:: MonadBuild m' 
=> DataType

key_dtype: Type of the table keys.

-> DataType

value_dtype: Type of the table values.

-> m' (Tensor Ref ByteString)

table_handle: Handle to a table.

Creates an empty hash table.

This op creates a mutable hash table, specifying the type of its keys and + values. Each value must be a scalar. Data can be inserted into the table using + the insert operations. It does not support the initialization operation.

mutableHashTable'

Arguments

:: MonadBuild m' 
=> OpParams 
-> DataType

key_dtype: Type of the table keys.

-> DataType

value_dtype: Type of the table values.

-> m' (Tensor Ref ByteString)

table_handle: Handle to a table.

mutableHashTableOfTensors

Arguments

:: MonadBuild m' 
=> DataType

key_dtype: Type of the table keys.

-> DataType

value_dtype: Type of the table values.

-> m' (Tensor Ref ByteString)

table_handle: Handle to a table.

Creates an empty hash table.

This op creates a mutable hash table, specifying the type of its keys and + values. Each value must be a vector. Data can be inserted into the table using + the insert operations. It does not support the initialization operation.

mutableHashTableOfTensors'

Arguments

:: MonadBuild m' 
=> OpParams 
-> DataType

key_dtype: Type of the table keys.

-> DataType

value_dtype: Type of the table values.

-> m' (Tensor Ref ByteString)

table_handle: Handle to a table.

neg

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes numerical negative value element-wise.

I.e., \(y = -x\).

negTrain

Arguments

:: MonadBuild m' 
=> Int64

num_negative_samples: Number of negative samples per example.

-> Tensor Ref Float

w_in: input word embedding.

-> Tensor Ref Float

w_out: output word embedding.

-> Tensor v'3 Int32

examples: A vector of word ids.

-> Tensor v'4 Int32

labels: A vector of word ids.

-> Tensor v'5 Float

lr

-> m' ControlNode 

Training via negative sampling.

negTrain'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Int64

num_negative_samples: Number of negative samples per example.

-> Tensor Ref Float

w_in: input word embedding.

-> Tensor Ref Float

w_out: output word embedding.

-> Tensor v'3 Int32

examples: A vector of word ids.

-> Tensor v'4 Int32

labels: A vector of word ids.

-> Tensor v'5 Float

lr

-> m' ControlNode 

nextIteration

Arguments

:: TensorType t 
=> Tensor v'1 t

data: The tensor to be made available to the next iteration.

-> Tensor Build t

output: The same tensor as `data`.

Makes its input available to the next iteration.

nextIteration'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

data: The tensor to be made available to the next iteration.

-> Tensor Build t

output: The same tensor as `data`.

noOp :: forall m'. MonadBuild m' => m' ControlNode

Does nothing. Only useful as a placeholder for control edges.

noOp' :: forall m'. MonadBuild m' => OpParams -> m' ControlNode

nonMaxSuppression

Arguments

:: Tensor v'1 Float

boxes: A 2-D float tensor of shape `[num_boxes, 4]`.

-> Tensor v'2 Float

scores: A 1-D float tensor of shape `[num_boxes]` representing a single + score corresponding to each box (each row of boxes).

-> Tensor v'3 Int32

max_output_size: A scalar integer tensor representing the maximum number of + boxes to be selected by non max suppression.

-> Tensor Build Int32

selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + indices from the boxes tensor, where `M <= max_output_size`.

Greedily selects a subset of bounding boxes in descending order of score,

pruning away boxes that have high intersection-over-union (IOU) overlap + with previously selected boxes. Bounding boxes are supplied as + [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + diagonal pair of box corners and the coordinates can be provided as normalized + (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + is agnostic to where the origin is in the coordinate system. Note that this + algorithm is invariant to orthogonal transformations and translations + of the coordinate system; thus translating or reflections of the coordinate + system result in the same boxes being selected by the algorithm.

The output of this operation is a set of integers indexing into the input + collection of bounding boxes representing the selected boxes. The bounding + box coordinates corresponding to the selected indices can then be obtained + using the `tf.gather operation`. For example:

selected_indices = tf.image.non_max_suppression( + boxes, scores, max_output_size, iou_threshold) + selected_boxes = tf.gather(boxes, selected_indices)

nonMaxSuppression'

Arguments

:: OpParams 
-> Tensor v'1 Float

boxes: A 2-D float tensor of shape `[num_boxes, 4]`.

-> Tensor v'2 Float

scores: A 1-D float tensor of shape `[num_boxes]` representing a single + score corresponding to each box (each row of boxes).

-> Tensor v'3 Int32

max_output_size: A scalar integer tensor representing the maximum number of + boxes to be selected by non max suppression.

-> Tensor Build Int32

selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + indices from the boxes tensor, where `M <= max_output_size`.

notEqual

Returns the truth value of (x != y) element-wise.

  • NOTE*: NotEqual supports broadcasting. More about broadcasting + here

oneHot

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Word8]` tI) 
=> Tensor v'1 tI

indices: A tensor of indices.

-> Tensor v'2 Int32

depth: A scalar defining the depth of the one hot dimension.

-> Tensor v'3 t

on_value: A scalar defining the value to fill in output when `indices[j] = i`.

-> Tensor v'4 t

off_value: A scalar defining the value to fill in output when `indices[j] != i`.

-> Tensor Build t

output: The one-hot tensor.

Returns a one-hot tensor.

The locations represented by indices in indices take value on_value, + while all other locations take value off_value.

If the input indices is rank N, the output will have rank `N+1`, + The new axis is created at dimension axis (default: the new axis is + appended at the end).

If indices is a scalar the output shape will be a vector of length depth.

If indices is a vector of length features, the output shape will be: + ``` + features x depth if axis == -1 + depth x features if axis == 0 + ```

If indices is a matrix (batch) with shape `[batch, features]`, + the output shape will be: + ``` + batch x features x depth if axis == -1 + batch x depth x features if axis == 1 + depth x batch x features if axis == 0 + ```

Examples + =========

Suppose that

``` + indices = [0, 2, -1, 1] + depth = 3 + on_value = 5.0 + off_value = 0.0 + axis = -1 + ```

Then output is `[4 x 3]`:

```output = + [5.0 0.0 0.0] // one_hot(0) + [0.0 0.0 5.0] // one_hot(2) + [0.0 0.0 0.0] // one_hot(-1) + [0.0 5.0 0.0] // one_hot(1) + ```

Suppose that

``` + indices = [0, 2, -1, 1] + depth = 3 + on_value = 0.0 + off_value = 3.0 + axis = 0 + ```

Then output is `[3 x 4]`:

```output = + [0.0 3.0 3.0 3.0] + [3.0 3.0 3.0 0.0] + [3.0 3.0 3.0 3.0] + [3.0 0.0 3.0 3.0] + // ^ one_hot(0) + // ^ one_hot(2) + // ^ one_hot(-1) + // ^ one_hot(1) + ``` + Suppose that

``` + indices = [[0, 2], [1, -1]] + depth = 3 + on_value = 1.0 + off_value = 0.0 + axis = -1 + ```

Then output is `[2 x 2 x 3]`:

```output = + [ + [1.0, 0.0, 0.0] // one_hot(0) + [0.0, 0.0, 1.0] // one_hot(2) + ][ + [0.0, 1.0, 0.0] // one_hot(1) + [0.0, 0.0, 0.0] // one_hot(-1) + ]```

oneHot'

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Word8]` tI) 
=> OpParams 
-> Tensor v'1 tI

indices: A tensor of indices.

-> Tensor v'2 Int32

depth: A scalar defining the depth of the one hot dimension.

-> Tensor v'3 t

on_value: A scalar defining the value to fill in output when `indices[j] = i`.

-> Tensor v'4 t

off_value: A scalar defining the value to fill in output when `indices[j] != i`.

-> Tensor Build t

output: The one-hot tensor.

pack

Arguments

:: TensorType t 
=> [Tensor v'1 t]

values: Must be of same shape and type.

-> Tensor Build t

output: The packed tensor.

Packs a list of N rank-R tensors into one rank-`(R+1)` tensor.

Packs the N tensors in values into a tensor with rank one higher than each + tensor in values, by packing them along the axis dimension. + Given a list of tensors of shape `(A, B, C)`;

if `axis == 0` then the output tensor will have the shape `(N, A, B, C)`. + if `axis == 1` then the output tensor will have the shape `(A, N, B, C)`. + Etc.

For example:

```prettyprint + # x is [1, 4] + # y is [2, 5] + # z is [3, 6] + pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] + ```

This is the opposite of unpack.

pack'

Arguments

:: TensorType t 
=> OpParams 
-> [Tensor v'1 t]

values: Must be of same shape and type.

-> Tensor Build t

output: The packed tensor.

pad

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
=> Tensor v'1 t

input

-> Tensor v'2 tpaddings

paddings

-> Tensor Build t

output

Pads a tensor with zeros.

This operation pads a input with zeros according to the paddings you + specify. paddings is an integer tensor with shape `[Dn, 2]`, where n is the + rank of input. For each dimension D of input, `paddings[D, 0]` indicates + how many zeros to add before the contents of input in that dimension, and + `paddings[D, 1]` indicates how many zeros to add after the contents of input + in that dimension.

The padded size of each dimension D of the output is:

`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`

For example:

```prettyprint + # t is [[1, 1], [2, 2]] + # paddings is [[1, 1], [2, 2]] + # rank of t is 2 + pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + [0, 0, 1, 1, 0, 0] + [0, 0, 2, 2, 0, 0] + [0, 0, 0, 0, 0, 0]] + ```

pad'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 tpaddings

paddings

-> Tensor Build t

output

paddingFIFOQueue

Arguments

:: MonadBuild m' 
=> [DataType]

component_types: The type of each component in a value.

-> m' (Tensor Ref ByteString)

handle: The handle to the queue.

A queue that produces elements in first-in first-out order.

Variable-size shapes are allowed by setting the corresponding shape dimensions + to 0 in the shape attr. In this case DequeueMany will pad up to the maximum + size of any given element in the minibatch. See below for details.

paddingFIFOQueue'

Arguments

:: MonadBuild m' 
=> OpParams 
-> [DataType]

component_types: The type of each component in a value.

-> m' (Tensor Ref ByteString)

handle: The handle to the queue.

paddingFIFOQueueV2

Arguments

:: MonadBuild m' 
=> [DataType]

component_types: The type of each component in a value.

-> m' ResourceHandle

handle: The handle to the queue.

A queue that produces elements in first-in first-out order.

Variable-size shapes are allowed by setting the corresponding shape dimensions + to 0 in the shape attr. In this case DequeueMany will pad up to the maximum + size of any given element in the minibatch. See below for details.

paddingFIFOQueueV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> [DataType]

component_types: The type of each component in a value.

-> m' ResourceHandle

handle: The handle to the queue.

parallelConcat

Arguments

:: TensorType t 
=> Shape

shape: the final shape of the result; should be equal to the shapes of any input + but with the number of input values in the first dimension.

-> [Tensor v'1 t]

values: Tensors to be concatenated. All must have size 1 in the first dimension + and same shape.

-> Tensor Build t

output: The concatenated tensor.

Concatenates a list of N tensors along the first dimension.

The input tensors are all required to have size 1 in the first dimension.

For example:

```prettyprint + # x is [[1, 4]] + # y is [[2, 5]] + # z is [[3, 6]] + parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + ```

The difference between concat and parallel_concat is that concat requires all + of the inputs be computed before the operation will begin but doesn't require + that the input shapes be known during graph construction. Parallel concat + will copy pieces of the input into the output as they become available, in + some situations this can provide a performance benefit.

parallelConcat'

Arguments

:: TensorType t 
=> OpParams 
-> Shape

shape: the final shape of the result; should be equal to the shapes of any input + but with the number of input values in the first dimension.

-> [Tensor v'1 t]

values: Tensors to be concatenated. All must have size 1 in the first dimension + and same shape.

-> Tensor Build t

output: The concatenated tensor.

parameterizedTruncatedNormal

Arguments

:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
=> Tensor v'1 t

shape: The shape of the output tensor. Batches are indexed by the 0th dimension.

-> Tensor v'2 dtype

means: The mean parameter of each batch.

-> Tensor v'3 dtype

stdevs: The standard deviation parameter of each batch. Must be greater than 0.

-> Tensor v'4 dtype

minvals: The minimum cutoff. May be -infinity.

-> Tensor v'5 dtype

maxvals: The maximum cutoff. May be +infinity, and must be more than the minval + for each batch.

-> m' (Tensor Value dtype)

output: A matrix of shape num_batches x samples_per_batch, filled with random + truncated normal values using the parameters for each row.

Outputs random values from a normal distribution. The parameters may each be a

scalar which applies to the entire output, or a vector of length shape[0] which + stores the parameters for each batch.

parameterizedTruncatedNormal'

Arguments

:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
=> OpParams 
-> Tensor v'1 t

shape: The shape of the output tensor. Batches are indexed by the 0th dimension.

-> Tensor v'2 dtype

means: The mean parameter of each batch.

-> Tensor v'3 dtype

stdevs: The standard deviation parameter of each batch. Must be greater than 0.

-> Tensor v'4 dtype

minvals: The minimum cutoff. May be -infinity.

-> Tensor v'5 dtype

maxvals: The maximum cutoff. May be +infinity, and must be more than the minval + for each batch.

-> m' (Tensor Value dtype)

output: A matrix of shape num_batches x samples_per_batch, filled with random + truncated normal values using the parameters for each row.

parseExample

Arguments

:: (OneOfs `[ByteString, Int64, Float]` sparse_types, OneOfs `[ByteString, Int64, Float]` tdense) 
=> Tensor v'1 ByteString

serialized: A vector containing a batch of binary serialized Example protos.

-> Tensor v'2 ByteString

names: A vector containing the names of the serialized protos. + May contain, for example, table key (descriptive) names for the + corresponding serialized protos. These are purely useful for debugging + purposes, and the presence of values here has no effect on the output. + May also be an empty vector if no names are available. + If non-empty, this vector must be the same length as "serialized".

-> [Tensor v'3 ByteString]

sparse_keys: A list of Nsparse string Tensors (scalars). + The keys expected in the Examples' features associated with sparse values.

-> [Tensor v'4 ByteString]

dense_keys: A list of Ndense string Tensors (scalars). + The keys expected in the Examples' features associated with dense values.

-> TensorList v'5 tdense

dense_defaults: A list of Ndense Tensors (some may be empty). + dense_defaults[j] provides default values + when the example's feature_map lacks dense_key[j]. If an empty Tensor is + provided for dense_defaults[j], then the Feature dense_keys[j] is required. + The input type is inferred from dense_defaults[j], even when it's empty. + If dense_defaults[j] is not empty, its shape must match dense_shapes[j].

-> ([Tensor Build Int64], TensorList Build sparse_types, [Tensor Build Int64], TensorList Build tdense)

(sparse_indices, sparse_values, sparse_shapes, dense_values)

  • sparse_indices
  • sparse_values
  • sparse_shapes
  • dense_values

Transforms a vector of brain.Example protos (as strings) into typed tensors.

parseExample'

Arguments

:: (OneOfs `[ByteString, Int64, Float]` sparse_types, OneOfs `[ByteString, Int64, Float]` tdense) 
=> OpParams 
-> Tensor v'1 ByteString

serialized: A vector containing a batch of binary serialized Example protos.

-> Tensor v'2 ByteString

names: A vector containing the names of the serialized protos. + May contain, for example, table key (descriptive) names for the + corresponding serialized protos. These are purely useful for debugging + purposes, and the presence of values here has no effect on the output. + May also be an empty vector if no names are available. + If non-empty, this vector must be the same length as "serialized".

-> [Tensor v'3 ByteString]

sparse_keys: A list of Nsparse string Tensors (scalars). + The keys expected in the Examples' features associated with sparse values.

-> [Tensor v'4 ByteString]

dense_keys: A list of Ndense string Tensors (scalars). + The keys expected in the Examples' features associated with dense values.

-> TensorList v'5 tdense

dense_defaults: A list of Ndense Tensors (some may be empty). + dense_defaults[j] provides default values + when the example's feature_map lacks dense_key[j]. If an empty Tensor is + provided for dense_defaults[j], then the Feature dense_keys[j] is required. + The input type is inferred from dense_defaults[j], even when it's empty. + If dense_defaults[j] is not empty, its shape must match dense_shapes[j].

-> ([Tensor Build Int64], TensorList Build sparse_types, [Tensor Build Int64], TensorList Build tdense)

(sparse_indices, sparse_values, sparse_shapes, dense_values)

  • sparse_indices
  • sparse_values
  • sparse_shapes
  • dense_values

parseSingleSequenceExample

Arguments

:: (OneOfs `[ByteString, Int64, Float]` context_sparse_types, OneOfs `[ByteString, Int64, Float]` tcontext_dense, OneOfs `[ByteString, Int64, Float]` feature_list_dense_types, OneOfs `[ByteString, Int64, Float]` feature_list_sparse_types) 
=> Tensor v'1 ByteString

serialized: A scalar containing a binary serialized SequenceExample proto.

-> Tensor v'2 ByteString

feature_list_dense_missing_assumed_empty: A vector listing the + FeatureList keys which may be missing from the SequenceExample. If the + associated FeatureList is missing, it is treated as empty. By default, + any FeatureList not listed in this vector must exist in the SequenceExample.

-> [Tensor v'3 ByteString]

context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). + The keys expected in the Examples' features associated with context_sparse + values.

-> [Tensor v'4 ByteString]

context_dense_keys: A list of Ncontext_dense string Tensors (scalars). + The keys expected in the SequenceExamples' context features associated with + dense values.

-> [Tensor v'5 ByteString]

feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors + (scalars). The keys expected in the FeatureLists associated with sparse + values.

-> [Tensor v'6 ByteString]

feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). + The keys expected in the SequenceExamples' feature_lists associated + with lists of dense values.

-> TensorList v'7 tcontext_dense

context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). + context_dense_defaults[j] provides default values + when the SequenceExample's context map lacks context_dense_key[j]. + If an empty Tensor is provided for context_dense_defaults[j], + then the Feature context_dense_keys[j] is required. + The input type is inferred from context_dense_defaults[j], even when it's + empty. If context_dense_defaults[j] is not empty, its shape must match + context_dense_shapes[j].

-> Tensor v'8 ByteString

debug_name: A scalar containing the name of the serialized proto. + May contain, for example, table key (descriptive) name for the + corresponding serialized proto. This is purely useful for debugging + purposes, and the presence of values here has no effect on the output. + May also be an empty scalar if no name is available.

-> ([Tensor Build Int64], TensorList Build context_sparse_types, [Tensor Build Int64], TensorList Build tcontext_dense, [Tensor Build Int64], TensorList Build feature_list_sparse_types, [Tensor Build Int64], TensorList Build feature_list_dense_types)

(context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values)

  • context_sparse_indices
  • context_sparse_values
  • context_sparse_shapes
  • context_dense_values
  • feature_list_sparse_indices
  • feature_list_sparse_values
  • feature_list_sparse_shapes
  • feature_list_dense_values

Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.

parseSingleSequenceExample'

Arguments

:: (OneOfs `[ByteString, Int64, Float]` context_sparse_types, OneOfs `[ByteString, Int64, Float]` tcontext_dense, OneOfs `[ByteString, Int64, Float]` feature_list_dense_types, OneOfs `[ByteString, Int64, Float]` feature_list_sparse_types) 
=> OpParams 
-> Tensor v'1 ByteString

serialized: A scalar containing a binary serialized SequenceExample proto.

-> Tensor v'2 ByteString

feature_list_dense_missing_assumed_empty: A vector listing the + FeatureList keys which may be missing from the SequenceExample. If the + associated FeatureList is missing, it is treated as empty. By default, + any FeatureList not listed in this vector must exist in the SequenceExample.

-> [Tensor v'3 ByteString]

context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). + The keys expected in the Examples' features associated with context_sparse + values.

-> [Tensor v'4 ByteString]

context_dense_keys: A list of Ncontext_dense string Tensors (scalars). + The keys expected in the SequenceExamples' context features associated with + dense values.

-> [Tensor v'5 ByteString]

feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors + (scalars). The keys expected in the FeatureLists associated with sparse + values.

-> [Tensor v'6 ByteString]

feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). + The keys expected in the SequenceExamples' feature_lists associated + with lists of dense values.

-> TensorList v'7 tcontext_dense

context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). + context_dense_defaults[j] provides default values + when the SequenceExample's context map lacks context_dense_key[j]. + If an empty Tensor is provided for context_dense_defaults[j], + then the Feature context_dense_keys[j] is required. + The input type is inferred from context_dense_defaults[j], even when it's + empty. If context_dense_defaults[j] is not empty, its shape must match + context_dense_shapes[j].

-> Tensor v'8 ByteString

debug_name: A scalar containing the name of the serialized proto. + May contain, for example, table key (descriptive) name for the + corresponding serialized proto. This is purely useful for debugging + purposes, and the presence of values here has no effect on the output. + May also be an empty scalar if no name is available.

-> ([Tensor Build Int64], TensorList Build context_sparse_types, [Tensor Build Int64], TensorList Build tcontext_dense, [Tensor Build Int64], TensorList Build feature_list_sparse_types, [Tensor Build Int64], TensorList Build feature_list_dense_types)

(context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values)

  • context_sparse_indices
  • context_sparse_values
  • context_sparse_shapes
  • context_dense_values
  • feature_list_sparse_indices
  • feature_list_sparse_values
  • feature_list_sparse_shapes
  • feature_list_dense_values

parseTensor

Arguments

:: TensorType out_type 
=> Tensor v'1 ByteString

serialized: A scalar string containing a serialized TensorProto proto.

-> Tensor Build out_type

output: A Tensor of type out_type.

Transforms a serialized tensorflow.TensorProto proto into a Tensor.

parseTensor'

Arguments

:: TensorType out_type 
=> OpParams 
-> Tensor v'1 ByteString

serialized: A scalar string containing a serialized TensorProto proto.

-> Tensor Build out_type

output: A Tensor of type out_type.

placeholder

Arguments

:: TensorType dtype 
=> Tensor Build dtype

output: A placeholder tensor that must be replaced using the feed mechanism.

A placeholder op for a value that will be fed into the computation.

N.B. This operation will fail with an error if it is executed. It is + intended as a way to represent a value that will always be fed, and to + provide attrs that enable the fed value to be checked at runtime.

placeholder'

Arguments

:: TensorType dtype 
=> OpParams 
-> Tensor Build dtype

output: A placeholder tensor that must be replaced using the feed mechanism.

placeholderV2

Arguments

:: TensorType dtype 
=> Shape

shape: The shape of the tensor. The shape can be any partially-specified + shape. To be unconstrained, pass in a shape with unknown rank.

-> Tensor Build dtype

output: A placeholder tensor that must be replaced using the feed mechanism.

A placeholder op for a value that will be fed into the computation.

N.B. This operation will fail with an error if it is executed. It is + intended as a way to represent a value that will always be fed, and to + provide attrs that enable the fed value to be checked at runtime.

placeholderV2'

Arguments

:: TensorType dtype 
=> OpParams 
-> Shape

shape: The shape of the tensor. The shape can be any partially-specified + shape. To be unconstrained, pass in a shape with unknown rank.

-> Tensor Build dtype

output: A placeholder tensor that must be replaced using the feed mechanism.

placeholderWithDefault

Arguments

:: TensorType dtype 
=> Shape

shape: The (possibly partial) shape of the tensor.

-> Tensor v'1 dtype

input: The default value to produce when output is not fed.

-> Tensor Build dtype

output: A placeholder tensor that defaults to input if it is not fed.

A placeholder op that passes through input when its output is not fed.

placeholderWithDefault'

Arguments

:: TensorType dtype 
=> OpParams 
-> Shape

shape: The (possibly partial) shape of the tensor.

-> Tensor v'1 dtype

input: The default value to produce when output is not fed.

-> Tensor Build dtype

output: A placeholder tensor that defaults to input if it is not fed.

polygamma

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

a

-> Tensor v'2 t

x

-> Tensor Build t

z

Compute the polygamma function \(psi^{(n)}(x)\).

The polygamma function is defined as:

``` + psi^{(n)}(x) = frac{d^n}{dx^n} psi(x) + ``` + where \(psi(x)\) is the digamma function.

polygamma'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

a

-> Tensor v'2 t

x

-> Tensor Build t

z

pow

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Computes the power of one value to another.

Given a tensor x and a tensor y, this operation computes \(x^y\) for + corresponding elements in x and y. For example:

``` + # tensor x is [[2, 2]], [3, 3]] + # tensor y is [[8, 16], [2, 3]] + tf.pow(x, y) ==> [[256, 65536], [9, 27]] + ```

pow'

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

preventGradient

Arguments

:: TensorType t 
=> Tensor v'1 t

input

-> Tensor Build t

output

An identity op that triggers an error if a gradient is requested.

When executed in a graph, this op outputs its input tensor as-is.

When building ops to compute gradients, the TensorFlow gradient system + will return an error when trying to lookup the gradient of this op, + because no gradient must ever be registered for this function. This + op exists to prevent subtle bugs from silently returning unimplemented + gradients in some corner cases.

preventGradient'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build t

output

print

Arguments

:: (MonadBuild m', TensorType t, TensorTypes u) 
=> Tensor v'1 t

input: The tensor passed to output

-> TensorList v'2 u

data: A list of tensors to print out when op is evaluated.

-> m' (Tensor Value t)

output: = The unmodified input tensor

Prints a list of tensors.

Passes input through to output and prints `data` when evaluating.

print'

Arguments

:: (MonadBuild m', TensorType t, TensorTypes u) 
=> OpParams 
-> Tensor v'1 t

input: The tensor passed to output

-> TensorList v'2 u

data: A list of tensors to print out when op is evaluated.

-> m' (Tensor Value t)

output: = The unmodified input tensor

priorityQueue

Arguments

:: MonadBuild m' 
=> m' (Tensor Ref ByteString)

handle: The handle to the queue.

A queue that produces elements sorted by the first component value.

Note that the PriorityQueue requires the first component of any element + to be a scalar int64, in addition to the other elements declared by + component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue + and DequeueMany) on a PriorityQueue will all require (resp. output) one extra + entry in their input (resp. output) lists.

priorityQueue'

Arguments

:: MonadBuild m' 
=> OpParams 
-> m' (Tensor Ref ByteString)

handle: The handle to the queue.

priorityQueueV2

Arguments

:: MonadBuild m' 
=> m' ResourceHandle

handle: The handle to the queue.

A queue that produces elements sorted by the first component value.

Note that the PriorityQueue requires the first component of any element + to be a scalar int64, in addition to the other elements declared by + component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue + and DequeueMany) on a PriorityQueue will all require (resp. output) one extra + entry in their input (resp. output) lists.

priorityQueueV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> m' ResourceHandle

handle: The handle to the queue.

prod

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build t

output: The reduced tensor.

Computes the product of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

prod'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build t

output: The reduced tensor.

qr

Arguments

:: OneOf `[Complex Double, Complex Float, Double, Float]` t 
=> Tensor v'1 t

input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + form matrices of size `[M, N]`. Let P be the minimum of M and N.

-> (Tensor Build t, Tensor Build t)

(q, r)

  • q: Orthonormal basis for range of a. If full_matrices is False then + shape is `[..., M, P]`; if full_matrices is True then shape is + `[..., M, M]`.
  • r: Triangular factor. If full_matrices is False then shape is + `[..., P, N]`. If full_matrices is True then shape is `[..., M, N]`.

Computes the QR decompositions of one or more matrices.

Computes the QR decomposition of each inner matrix in tensor such that + `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`

```prettyprint + # a is a tensor. + # q is a tensor of orthonormal matrices. + # r is a tensor of upper triangular matrices. + q, r = qr(a) + q_full, r_full = qr(a, full_matrices=True) + ```

qr'

Arguments

:: OneOf `[Complex Double, Complex Float, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + form matrices of size `[M, N]`. Let P be the minimum of M and N.

-> (Tensor Build t, Tensor Build t)

(q, r)

  • q: Orthonormal basis for range of a. If full_matrices is False then + shape is `[..., M, P]`; if full_matrices is True then shape is + `[..., M, M]`.
  • r: Triangular factor. If full_matrices is False then shape is + `[..., P, N]`. If full_matrices is True then shape is `[..., M, N]`.

quantizeAndDequantize

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input: Tensor to quantize and then dequantize.

-> Tensor Build t

output

Quantizes then dequantizes a tensor.

This op simulates the precision loss from the quantized forward pass by: + 1. Quantizing the tensor to fixed point numbers, which should match the target + quantization method when it is used in inference. + 2. Dequantizing it back to floating point numbers for the following ops, most + likely matmul.

There are different ways to quantize. This version does not use the full range + of the output type, choosing to elide the lowest possible value for symmetry + (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit + quantization), so that 0.0 maps to 0.

To perform this op, we first find the range of values in our tensor. The range + we use is always centered on 0, so we find m such that

  1. m = max(abs(input_min), abs(input_max)) if range_given is true,
  2. m = max(max(abs(min_elem(input)), abs(max_elem(input))) otherwise.

Our input tensor range is then [-m, m].

Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed]. + If signed_input is true, this is

min_fixed, max_fixed
=
-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1
.

Otherwise, if signed_input is false, the fixed-point range is

min_fixed, max_fixed
= [0, (1 << num_bits) - 1].

From this we compute our scaling factor, s:

s = (max_fixed - min_fixed) / (2 * m).

Now we can quantize and dequantize the elements of our tensor. An element e + is transformed into e':

e' = (e * s).round_to_nearest() / s.

Note that we have a different number of buckets in the signed vs. unsigned + cases. For example, if num_bits == 8, we get 254 buckets in the signed case + vs. 255 in the unsigned case.

For example, suppose num_bits = 8 and m = 1. Then

min_fixed, max_fixed
= [-127, 127], and + s = (127 + 127) / 2 = 127.

Given the vector {-1, -0.5, 0, 0.3}, this is quantized to + {-127, -63, 0, 38}, and dequantized to {-1, -63.0127, 0, 38.0127}.

quantizeAndDequantize'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Tensor to quantize and then dequantize.

-> Tensor Build t

output

quantizeDownAndShrinkRange

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v'1 tinput

input

-> Tensor v'2 Float

input_min: The float value that the minimum quantized input value represents.

-> Tensor v'3 Float

input_max: The float value that the maximum quantized input value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(output, output_min, output_max)

  • output
  • output_min: The float value that the minimum quantized output value represents.
  • output_max: The float value that the maximum quantized output value represents.

Convert the quantized input tensor into a lower-precision output, using the

actual distribution of the values to maximize the usage of the lower bit depth + and adjusting the output min and max ranges accordingly.

input_min, input_max
are scalar floats that specify the range for the float + interpretation of the input data. For example, if input_min is -1.0f and + input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.

This operator tries to squeeze as much precision as possible into an output with + a lower bit depth by calculating the actual min and max values found in the + data. For example, maybe that quint16 input has no values lower than 16,384 and + none higher than 49,152. That means only half the range is actually needed, all + the float interpretations are between -0.5f and 0.5f, so if we want to compress + the data into a quint8 output, we can use that range rather than the theoretical + -1.0f to 1.0f that is suggested by the input min and max.

In practice, this is most useful for taking output from operations like + QuantizedMatMul that can produce higher bit-depth outputs than their inputs and + may have large potential output ranges, but in practice have a distribution of + input values that only uses a small fraction of the possible range. By feeding + that output into this operator, we can reduce it from 32 bits down to 8 with + minimal loss of accuracy.

quantizeDownAndShrinkRange'

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> OpParams 
-> Tensor v'1 tinput

input

-> Tensor v'2 Float

input_min: The float value that the minimum quantized input value represents.

-> Tensor v'3 Float

input_max: The float value that the maximum quantized input value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(output, output_min, output_max)

  • output
  • output_min: The float value that the minimum quantized output value represents.
  • output_max: The float value that the maximum quantized output value represents.

quantizeV2

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` t 
=> Tensor v'1 Float

input

-> Tensor v'2 Float

min_range: The minimum scalar value possibly produced for the input.

-> Tensor v'3 Float

max_range: The maximum scalar value possibly produced for the input.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(output, output_min, output_max)

  • output: The quantized data produced from the float input.
  • output_min: The actual minimum scalar value used for the output.
  • output_max: The actual maximum scalar value used for the output.

Quantize the input tensor of type float to output tensor of type T.

min_range, max_range
are scalar floats that specify the range for + the input data. The mode attribute controls exactly which calculations are + used to convert the float values to their quantized equivalents.

In MIN_COMBINED mode, each value of the tensor will undergo the following:

``` + out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + if T == qint8, out[i] -= (range(T) + 1) / 2.0 + ``` + here `range(T) = numeric_limitsT::max() - numeric_limitsT::min()`

  • MIN_COMBINED Mode Example*

Assume the input is type float and has a possible range of [0.0, 6.0] and the + output type is quint8 ([0, 255]). The min_range and max_range values should be + specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each + value of the input by 255/6 and cast to quint8.

If the output type was qint8 ([-128, 127]), the operation will additionally + subtract each value by 128 prior to casting, so that the range of values aligns + with the range of qint8.

If the mode is MIN_FIRST, then this approach is used:

``` + number_of_steps = 1 << (# of bits in T) + range_adjust = number_of_steps / (number_of_steps - 1) + range = (range_max - range_min) * range_adjust + range_scale = number_of_steps / range + quantized = round(input * range_scale) - round(range_min * range_scale) + + numeric_limitsT::min() + quantized = max(quantized, numeric_limitsT::min()) + quantized = min(quantized, numeric_limitsT::max()) + ```

The biggest difference between this and MIN_COMBINED is that the minimum range + is rounded first, before it's subtracted from the rounded value. With + MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing + and dequantizing will introduce a larger and larger error.

One thing to watch out for is that the operator may choose to adjust the + requested minimum and maximum values slightly during the quantization process, + so you should always use the output ports as the range for further calculations. + For example, if the requested minimum and maximum values are close to equal, + they will be separated by a small epsilon value to prevent ill-formed quantized + buffers from being created. Otherwise, you can end up with buffers where all the + quantized values map to the same float value, which causes problems for + operations that have to perform further calculations on them.

quantizeV2'

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` t 
=> OpParams 
-> Tensor v'1 Float

input

-> Tensor v'2 Float

min_range: The minimum scalar value possibly produced for the input.

-> Tensor v'3 Float

max_range: The maximum scalar value possibly produced for the input.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(output, output_min, output_max)

  • output: The quantized data produced from the float input.
  • output_min: The actual minimum scalar value used for the output.
  • output_max: The actual maximum scalar value used for the output.

quantizedAvgPool

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` t 
=> Tensor v'1 t

input: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v'3 Float

max_input: The float value that the highest quantized input value represents.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(output, min_output, max_output)

  • output
  • min_output: The float value that the lowest quantized output value represents.
  • max_output: The float value that the highest quantized output value represents.

Produces the average pool of the input tensor for quantized types.

quantizedAvgPool'

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` t 
=> OpParams 
-> Tensor v'1 t

input: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v'3 Float

max_input: The float value that the highest quantized input value represents.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(output, min_output, max_output)

  • output
  • min_output: The float value that the lowest quantized output value represents.
  • max_output: The float value that the highest quantized output value represents.

quantizedBatchNormWithGlobalNormalization

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Bool

scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

-> Float

variance_epsilon: A small float number to avoid dividing by 0.

-> Tensor v'1 tinput

t: A 4D input Tensor.

-> Tensor v'2 Float

t_min: The value represented by the lowest quantized input.

-> Tensor v'3 Float

t_max: The value represented by the highest quantized input.

-> Tensor v'4 tinput

m: A 1D mean Tensor with size matching the last dimension of t. + This is the first output from tf.nn.moments, + or a saved moving average thereof.

-> Tensor v'5 Float

m_min: The value represented by the lowest quantized mean.

-> Tensor v'6 Float

m_max: The value represented by the highest quantized mean.

-> Tensor v'7 tinput

v: A 1D variance Tensor with size matching the last dimension of t. + This is the second output from tf.nn.moments, + or a saved moving average thereof.

-> Tensor v'8 Float

v_min: The value represented by the lowest quantized variance.

-> Tensor v'9 Float

v_max: The value represented by the highest quantized variance.

-> Tensor v'10 tinput

beta: A 1D beta Tensor with size matching the last dimension of t. + An offset to be added to the normalized tensor.

-> Tensor v'11 Float

beta_min: The value represented by the lowest quantized offset.

-> Tensor v'12 Float

beta_max: The value represented by the highest quantized offset.

-> Tensor v'13 tinput

gamma: A 1D gamma Tensor with size matching the last dimension of t. + If "scale_after_normalization" is true, this tensor will be multiplied + with the normalized tensor.

-> Tensor v'14 Float

gamma_min: The value represented by the lowest quantized gamma.

-> Tensor v'15 Float

gamma_max: The value represented by the highest quantized gamma.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(result, result_min, result_max)

  • result
  • result_min
  • result_max

Quantized Batch normalization.

This op is deprecated and will be removed in the future. Prefer + `tf.nn.batch_normalization`.

quantizedBatchNormWithGlobalNormalization'

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> OpParams 
-> Bool

scale_after_normalization: A bool indicating whether the resulted tensor + needs to be multiplied with gamma.

-> Float

variance_epsilon: A small float number to avoid dividing by 0.

-> Tensor v'1 tinput

t: A 4D input Tensor.

-> Tensor v'2 Float

t_min: The value represented by the lowest quantized input.

-> Tensor v'3 Float

t_max: The value represented by the highest quantized input.

-> Tensor v'4 tinput

m: A 1D mean Tensor with size matching the last dimension of t. + This is the first output from tf.nn.moments, + or a saved moving average thereof.

-> Tensor v'5 Float

m_min: The value represented by the lowest quantized mean.

-> Tensor v'6 Float

m_max: The value represented by the highest quantized mean.

-> Tensor v'7 tinput

v: A 1D variance Tensor with size matching the last dimension of t. + This is the second output from tf.nn.moments, + or a saved moving average thereof.

-> Tensor v'8 Float

v_min: The value represented by the lowest quantized variance.

-> Tensor v'9 Float

v_max: The value represented by the highest quantized variance.

-> Tensor v'10 tinput

beta: A 1D beta Tensor with size matching the last dimension of t. + An offset to be added to the normalized tensor.

-> Tensor v'11 Float

beta_min: The value represented by the lowest quantized offset.

-> Tensor v'12 Float

beta_max: The value represented by the highest quantized offset.

-> Tensor v'13 tinput

gamma: A 1D gamma Tensor with size matching the last dimension of t. + If "scale_after_normalization" is true, this tensor will be multiplied + with the normalized tensor.

-> Tensor v'14 Float

gamma_min: The value represented by the lowest quantized gamma.

-> Tensor v'15 Float

gamma_max: The value represented by the highest quantized gamma.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(result, result_min, result_max)

  • result
  • result_min
  • result_max

quantizedBiasAdd

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v'1 t1

input

-> Tensor v'2 t2

bias: A 1D bias Tensor with size matching the last dimension of input.

-> Tensor v'3 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v'4 Float

max_input: The float value that the highest quantized input value represents.

-> Tensor v'5 Float

min_bias: The float value that the lowest quantized bias value represents.

-> Tensor v'6 Float

max_bias: The float value that the highest quantized bias value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(output, min_out, max_out)

  • output
  • min_out: The float value that the lowest quantized output value represents.
  • max_out: The float value that the highest quantized output value represents.

Adds Tensor bias to Tensor input for Quantized types.

Broadcasts the values of bias on dimensions 0..N-2 of input.

quantizedBiasAdd'

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> OpParams 
-> Tensor v'1 t1

input

-> Tensor v'2 t2

bias: A 1D bias Tensor with size matching the last dimension of input.

-> Tensor v'3 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v'4 Float

max_input: The float value that the highest quantized input value represents.

-> Tensor v'5 Float

min_bias: The float value that the lowest quantized bias value represents.

-> Tensor v'6 Float

max_bias: The float value that the highest quantized bias value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(output, min_out, max_out)

  • output
  • min_out: The float value that the lowest quantized output value represents.
  • max_out: The float value that the highest quantized output value represents.

quantizedConcat

Arguments

:: TensorType t 
=> Tensor v'1 Int32

concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

-> [Tensor v'2 t]

values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

-> [Tensor v'3 Float]

input_mins: The minimum scalar values for each of the input tensors.

-> [Tensor v'4 Float]

input_maxes: The maximum scalar values for each of the input tensors.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(output, output_min, output_max)

  • output: A Tensor with the concatenation of values stacked along the + concat_dim dimension. This tensor's shape matches that of values except + in concat_dim where it has the sum of the sizes.
  • output_min: The float value that the minimum quantized output value represents.
  • output_max: The float value that the maximum quantized output value represents.

Concatenates quantized tensors along one dimension.

quantizedConcat'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 Int32

concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

-> [Tensor v'2 t]

values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

-> [Tensor v'3 Float]

input_mins: The minimum scalar values for each of the input tensors.

-> [Tensor v'4 Float]

input_maxes: The maximum scalar values for each of the input tensors.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(output, output_min, output_max)

  • output: A Tensor with the concatenation of values stacked along the + concat_dim dimension. This tensor's shape matches that of values except + in concat_dim where it has the sum of the sizes.
  • output_min: The float value that the minimum quantized output value represents.
  • output_max: The float value that the maximum quantized output value represents.

quantizedConv2D

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` tfilter, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v'1 tinput

input

-> Tensor v'2 tfilter

filter: filter's input_depth dimension must match input's depth dimensions.

-> Tensor v'3 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v'4 Float

max_input: The float value that the highest quantized input value represents.

-> Tensor v'5 Float

min_filter: The float value that the lowest quantized filter value represents.

-> Tensor v'6 Float

max_filter: The float value that the highest quantized filter value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(output, min_output, max_output)

  • output
  • min_output: The float value that the lowest quantized output value represents.
  • max_output: The float value that the highest quantized output value represents.

Computes a 2D convolution given quantized 4D input and filter tensors.

The inputs are quantized tensors where the lowest value represents the real + number of the associated minimum, and the highest represents the maximum. + This means that you can only interpret the quantized output in the same way, by + taking the returned minimum and maximum values into account.

quantizedConv2D'

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` tfilter, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> OpParams 
-> Tensor v'1 tinput

input

-> Tensor v'2 tfilter

filter: filter's input_depth dimension must match input's depth dimensions.

-> Tensor v'3 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v'4 Float

max_input: The float value that the highest quantized input value represents.

-> Tensor v'5 Float

min_filter: The float value that the lowest quantized filter value represents.

-> Tensor v'6 Float

max_filter: The float value that the highest quantized filter value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(output, min_output, max_output)

  • output
  • min_output: The float value that the lowest quantized output value represents.
  • max_output: The float value that the highest quantized output value represents.

quantizedInstanceNorm

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` t 
=> Tensor v'1 t

x: A 4D input Tensor.

-> Tensor v'2 Float

x_min: The value represented by the lowest quantized input.

-> Tensor v'3 Float

x_max: The value represented by the highest quantized input.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(y, y_min, y_max)

  • y: A 4D Tensor.
  • y_min: The value represented by the lowest quantized output.
  • y_max: The value represented by the highest quantized output.

Quantized Instance normalization.

quantizedInstanceNorm'

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` t 
=> OpParams 
-> Tensor v'1 t

x: A 4D input Tensor.

-> Tensor v'2 Float

x_min: The value represented by the lowest quantized input.

-> Tensor v'3 Float

x_max: The value represented by the highest quantized input.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(y, y_min, y_max)

  • y: A 4D Tensor.
  • y_min: The value represented by the lowest quantized output.
  • y_max: The value represented by the highest quantized output.

quantizedMatMul

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` toutput) 
=> Tensor v'1 t1

a: Must be a two-dimensional tensor.

-> Tensor v'2 t2

b: Must be a two-dimensional tensor.

-> Tensor v'3 Float

min_a: The float value that the lowest quantized a value represents.

-> Tensor v'4 Float

max_a: The float value that the highest quantized a value represents.

-> Tensor v'5 Float

min_b: The float value that the lowest quantized b value represents.

-> Tensor v'6 Float

max_b: The float value that the highest quantized b value represents.

-> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)

(out, min_out, max_out)

  • out
  • min_out: The float value that the lowest quantized output value represents.
  • max_out: The float value that the highest quantized output value represents.

Perform a quantized matrix multiplication of a by the matrix b.

The inputs must be two-dimensional matrices and the inner dimension of + a (after being transposed if transpose_a is non-zero) must match the + outer dimension of b (after being transposed if transposed_b is + non-zero).

quantizedMatMul'

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` t1, OneOf `[Int16, Int32, Word16, Word8]` t2, OneOf `[Int16, Int32, Word16, Word8]` toutput) 
=> OpParams 
-> Tensor v'1 t1

a: Must be a two-dimensional tensor.

-> Tensor v'2 t2

b: Must be a two-dimensional tensor.

-> Tensor v'3 Float

min_a: The float value that the lowest quantized a value represents.

-> Tensor v'4 Float

max_a: The float value that the highest quantized a value represents.

-> Tensor v'5 Float

min_b: The float value that the lowest quantized b value represents.

-> Tensor v'6 Float

max_b: The float value that the highest quantized b value represents.

-> (Tensor Build toutput, Tensor Build Float, Tensor Build Float)

(out, min_out, max_out)

  • out
  • min_out: The float value that the lowest quantized output value represents.
  • max_out: The float value that the highest quantized output value represents.

quantizedMaxPool

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` t 
=> Tensor v'1 t

input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.

-> Tensor v'2 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v'3 Float

max_input: The float value that the highest quantized input value represents.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(output, min_output, max_output)

  • output
  • min_output: The float value that the lowest quantized output value represents.
  • max_output: The float value that the highest quantized output value represents.

Produces the max pool of the input tensor for quantized types.

quantizedMaxPool'

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` t 
=> OpParams 
-> Tensor v'1 t

input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.

-> Tensor v'2 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v'3 Float

max_input: The float value that the highest quantized input value represents.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(output, min_output, max_output)

  • output
  • min_output: The float value that the lowest quantized output value represents.
  • max_output: The float value that the highest quantized output value represents.

quantizedRelu

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v'1 tinput

features

-> Tensor v'2 Float

min_features: The float value that the lowest quantized value represents.

-> Tensor v'3 Float

max_features: The float value that the highest quantized value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(activations, min_activations, max_activations)

  • activations: Has the same output shape as "features".
  • min_activations: The float value that the lowest quantized value represents.
  • max_activations: The float value that the highest quantized value represents.

Computes Quantized Rectified Linear: `max(features, 0)`

quantizedRelu'

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> OpParams 
-> Tensor v'1 tinput

features

-> Tensor v'2 Float

min_features: The float value that the lowest quantized value represents.

-> Tensor v'3 Float

max_features: The float value that the highest quantized value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(activations, min_activations, max_activations)

  • activations: Has the same output shape as "features".
  • min_activations: The float value that the lowest quantized value represents.
  • max_activations: The float value that the highest quantized value represents.

quantizedRelu6

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v'1 tinput

features

-> Tensor v'2 Float

min_features: The float value that the lowest quantized value represents.

-> Tensor v'3 Float

max_features: The float value that the highest quantized value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(activations, min_activations, max_activations)

  • activations: Has the same output shape as "features".
  • min_activations: The float value that the lowest quantized value represents.
  • max_activations: The float value that the highest quantized value represents.

Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`

quantizedRelu6'

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> OpParams 
-> Tensor v'1 tinput

features

-> Tensor v'2 Float

min_features: The float value that the lowest quantized value represents.

-> Tensor v'3 Float

max_features: The float value that the highest quantized value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(activations, min_activations, max_activations)

  • activations: Has the same output shape as "features".
  • min_activations: The float value that the lowest quantized value represents.
  • max_activations: The float value that the highest quantized value represents.

quantizedReluX

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v'1 tinput

features

-> Tensor v'2 Float

max_value

-> Tensor v'3 Float

min_features: The float value that the lowest quantized value represents.

-> Tensor v'4 Float

max_features: The float value that the highest quantized value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(activations, min_activations, max_activations)

  • activations: Has the same output shape as "features".
  • min_activations: The float value that the lowest quantized value represents.
  • max_activations: The float value that the highest quantized value represents.

Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`

quantizedReluX'

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> OpParams 
-> Tensor v'1 tinput

features

-> Tensor v'2 Float

max_value

-> Tensor v'3 Float

min_features: The float value that the lowest quantized value represents.

-> Tensor v'4 Float

max_features: The float value that the highest quantized value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(activations, min_activations, max_activations)

  • activations: Has the same output shape as "features".
  • min_activations: The float value that the lowest quantized value represents.
  • max_activations: The float value that the highest quantized value represents.

quantizedReshape

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tshape) 
=> Tensor v'1 t

tensor

-> Tensor v'2 tshape

shape: Defines the shape of the output tensor.

-> Tensor v'3 Float

input_min: The minimum value of the input.

-> Tensor v'4 Float

input_max: The maximum value of the input.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(output, output_min, output_max)

  • output
  • output_min: This value is copied from input_min.
  • output_max: This value is copied from input_max.

Reshapes a quantized tensor as per the Reshape op.

```

quantizedReshape'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tshape) 
=> OpParams 
-> Tensor v'1 t

tensor

-> Tensor v'2 tshape

shape: Defines the shape of the output tensor.

-> Tensor v'3 Float

input_min: The minimum value of the input.

-> Tensor v'4 Float

input_max: The maximum value of the input.

-> (Tensor Build t, Tensor Build Float, Tensor Build Float)

(output, output_min, output_max)

  • output
  • output_min: This value is copied from input_min.
  • output_max: This value is copied from input_max.

queueClose

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

handle: The handle to a queue.

-> m' ControlNode 

Closes the given queue.

This operation signals that no more elements will be enqueued in the + given queue. Subsequent Enqueue(Many) operations will fail. + Subsequent Dequeue(Many) operations will continue to succeed if + sufficient elements remain in the queue. Subsequent Dequeue(Many) + operations that would block will fail immediately.

queueClose'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a queue.

-> m' ControlNode 

queueCloseV2

Arguments

:: MonadBuild m' 
=> ResourceHandle

handle: The handle to a queue.

-> m' ControlNode 

Closes the given queue.

This operation signals that no more elements will be enqueued in the + given queue. Subsequent Enqueue(Many) operations will fail. + Subsequent Dequeue(Many) operations will continue to succeed if + sufficient elements remain in the queue. Subsequent Dequeue(Many) + operations that would block will fail immediately.

queueCloseV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

handle: The handle to a queue.

-> m' ControlNode 

queueDequeue

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> Tensor Ref ByteString

handle: The handle to a queue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

Dequeues a tuple of one or more tensors from the given queue.

This operation has k outputs, where k is the number of components + in the tuples stored in the given queue, and output i is the ith + component of the dequeued tuple.

N.B. If the queue is empty, this operation will block until an element + has been dequeued (or timeout_ms elapses, if specified).

queueDequeue'

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a queue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

queueDequeueMany

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> Tensor Ref ByteString

handle: The handle to a queue.

-> Tensor v'2 Int32

n: The number of tuples to dequeue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

Dequeues n tuples of one or more tensors from the given queue.

If the queue is closed and there are fewer than n elements, then an + OutOfRange error is returned.

This operation concatenates queue-element component tensors along the + 0th dimension to make a single component tensor. All of the components + in the dequeued tuple will have size n in the 0th dimension.

This operation has k outputs, where k is the number of components in + the tuples stored in the given queue, and output i is the ith + component of the dequeued tuple.

N.B. If the queue is empty, this operation will block until n elements + have been dequeued (or timeout_ms elapses, if specified).

queueDequeueMany'

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a queue.

-> Tensor v'2 Int32

n: The number of tuples to dequeue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

queueDequeueManyV2

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> ResourceHandle

handle: The handle to a queue.

-> Tensor v'2 Int32

n: The number of tuples to dequeue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

Dequeues n tuples of one or more tensors from the given queue.

If the queue is closed and there are fewer than n elements, then an + OutOfRange error is returned.

This operation concatenates queue-element component tensors along the + 0th dimension to make a single component tensor. All of the components + in the dequeued tuple will have size n in the 0th dimension.

This operation has k outputs, where k is the number of components in + the tuples stored in the given queue, and output i is the ith + component of the dequeued tuple.

N.B. If the queue is empty, this operation will block until n elements + have been dequeued (or timeout_ms elapses, if specified).

queueDequeueManyV2'

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> OpParams 
-> ResourceHandle

handle: The handle to a queue.

-> Tensor v'2 Int32

n: The number of tuples to dequeue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

queueDequeueUpTo

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> Tensor Ref ByteString

handle: The handle to a queue.

-> Tensor v'2 Int32

n: The number of tuples to dequeue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

Dequeues n tuples of one or more tensors from the given queue.

This operation is not supported by all queues. If a queue does not support + DequeueUpTo, then an Unimplemented error is returned.

If the queue is closed and there are more than 0 but less than n elements + remaining, then instead of returning an OutOfRange error like + QueueDequeueMany, less than n elements are returned immediately. If the queue + is closed and there are 0 elements left in the queue, then an OutOfRange + error is returned just like in QueueDequeueMany. Otherwise the behavior + is identical to QueueDequeueMany:

This operation concatenates queue-element component tensors along the + 0th dimension to make a single component tensor. All of the components + in the dequeued tuple will have size n in the 0th dimension.

This operation has k outputs, where k is the number of components in + the tuples stored in the given queue, and output i is the ith + component of the dequeued tuple.

queueDequeueUpTo'

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a queue.

-> Tensor v'2 Int32

n: The number of tuples to dequeue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

queueDequeueUpToV2

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> ResourceHandle

handle: The handle to a queue.

-> Tensor v'2 Int32

n: The number of tuples to dequeue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

Dequeues n tuples of one or more tensors from the given queue.

This operation is not supported by all queues. If a queue does not support + DequeueUpTo, then an Unimplemented error is returned.

If the queue is closed and there are more than 0 but less than n elements + remaining, then instead of returning an OutOfRange error like + QueueDequeueMany, less than n elements are returned immediately. If the queue + is closed and there are 0 elements left in the queue, then an OutOfRange + error is returned just like in QueueDequeueMany. Otherwise the behavior + is identical to QueueDequeueMany:

This operation concatenates queue-element component tensors along the + 0th dimension to make a single component tensor. All of the components + in the dequeued tuple will have size n in the 0th dimension.

This operation has k outputs, where k is the number of components in + the tuples stored in the given queue, and output i is the ith + component of the dequeued tuple.

queueDequeueUpToV2'

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> OpParams 
-> ResourceHandle

handle: The handle to a queue.

-> Tensor v'2 Int32

n: The number of tuples to dequeue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

queueDequeueV2

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> ResourceHandle

handle: The handle to a queue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

Dequeues a tuple of one or more tensors from the given queue.

This operation has k outputs, where k is the number of components + in the tuples stored in the given queue, and output i is the ith + component of the dequeued tuple.

N.B. If the queue is empty, this operation will block until an element + has been dequeued (or timeout_ms elapses, if specified).

queueDequeueV2'

Arguments

:: (MonadBuild m', TensorTypes component_types) 
=> OpParams 
-> ResourceHandle

handle: The handle to a queue.

-> m' (TensorList Value component_types)

components: One or more tensors that were dequeued as a tuple.

queueEnqueue

Arguments

:: (MonadBuild m', TensorTypes tcomponents) 
=> Tensor Ref ByteString

handle: The handle to a queue.

-> TensorList v'2 tcomponents

components: One or more tensors from which the enqueued tensors should be taken.

-> m' ControlNode 

Enqueues a tuple of one or more tensors in the given queue.

The components input has k elements, which correspond to the components of + tuples stored in the given queue.

N.B. If the queue is full, this operation will block until the given + element has been enqueued (or timeout_ms elapses, if specified).

queueEnqueue'

Arguments

:: (MonadBuild m', TensorTypes tcomponents) 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a queue.

-> TensorList v'2 tcomponents

components: One or more tensors from which the enqueued tensors should be taken.

-> m' ControlNode 

queueEnqueueMany

Arguments

:: (MonadBuild m', TensorTypes tcomponents) 
=> Tensor Ref ByteString

handle: The handle to a queue.

-> TensorList v'2 tcomponents

components: One or more tensors from which the enqueued tensors should + be taken.

-> m' ControlNode 

Enqueues zero or more tuples of one or more tensors in the given queue.

This operation slices each component tensor along the 0th dimension to + make multiple queue elements. All of the tuple components must have the + same size in the 0th dimension.

The components input has k elements, which correspond to the components of + tuples stored in the given queue.

N.B. If the queue is full, this operation will block until the given + elements have been enqueued (or timeout_ms elapses, if specified).

queueEnqueueMany'

Arguments

:: (MonadBuild m', TensorTypes tcomponents) 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a queue.

-> TensorList v'2 tcomponents

components: One or more tensors from which the enqueued tensors should + be taken.

-> m' ControlNode 

queueEnqueueManyV2

Arguments

:: (MonadBuild m', TensorTypes tcomponents) 
=> ResourceHandle

handle: The handle to a queue.

-> TensorList v'2 tcomponents

components: One or more tensors from which the enqueued tensors should + be taken.

-> m' ControlNode 

Enqueues zero or more tuples of one or more tensors in the given queue.

This operation slices each component tensor along the 0th dimension to + make multiple queue elements. All of the tuple components must have the + same size in the 0th dimension.

The components input has k elements, which correspond to the components of + tuples stored in the given queue.

N.B. If the queue is full, this operation will block until the given + elements have been enqueued (or timeout_ms elapses, if specified).

queueEnqueueManyV2'

Arguments

:: (MonadBuild m', TensorTypes tcomponents) 
=> OpParams 
-> ResourceHandle

handle: The handle to a queue.

-> TensorList v'2 tcomponents

components: One or more tensors from which the enqueued tensors should + be taken.

-> m' ControlNode 

queueEnqueueV2

Arguments

:: (MonadBuild m', TensorTypes tcomponents) 
=> ResourceHandle

handle: The handle to a queue.

-> TensorList v'2 tcomponents

components: One or more tensors from which the enqueued tensors should be taken.

-> m' ControlNode 

Enqueues a tuple of one or more tensors in the given queue.

The components input has k elements, which correspond to the components of + tuples stored in the given queue.

N.B. If the queue is full, this operation will block until the given + element has been enqueued (or timeout_ms elapses, if specified).

queueEnqueueV2'

Arguments

:: (MonadBuild m', TensorTypes tcomponents) 
=> OpParams 
-> ResourceHandle

handle: The handle to a queue.

-> TensorList v'2 tcomponents

components: One or more tensors from which the enqueued tensors should be taken.

-> m' ControlNode 

queueSize

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

handle: The handle to a queue.

-> m' (Tensor Value Int32)

size: The number of elements in the given queue.

Computes the number of elements in the given queue.

queueSize'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a queue.

-> m' (Tensor Value Int32)

size: The number of elements in the given queue.

queueSizeV2

Arguments

:: MonadBuild m' 
=> ResourceHandle

handle: The handle to a queue.

-> m' (Tensor Value Int32)

size: The number of elements in the given queue.

Computes the number of elements in the given queue.

queueSizeV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

handle: The handle to a queue.

-> m' (Tensor Value Int32)

size: The number of elements in the given queue.

rGBToHSV

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.

-> Tensor Build t

output: images converted to HSV.

Converts one or more images from RGB to HSV.

Outputs a tensor of the same shape as the images tensor, containing the HSV + value of the pixels. The output is only well defined if the value in images + are in `[0,1]`.

`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and + `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 + corresponds to pure red, hue 13 is pure green, and 23 is pure blue.

rGBToHSV'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.

-> Tensor Build t

output: images converted to HSV.

randomCrop

Arguments

:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) 
=> Tensor v'1 t

image: 3-D of shape `[height, width, channels]`.

-> Tensor v'2 Int64

size: 1-D of length 2 containing: crop_height, crop_width..

-> m' (Tensor Value t)

output: 3-D of shape `[crop_height, crop_width, channels].`

Randomly crop image.

size is a 1-D int64 tensor with 2 elements representing the crop height and + width. The values must be non negative.

This Op picks a random location in image and crops a height by width + rectangle from that location. The random location is picked so the cropped + area will fit inside the original image.

randomCrop'

Arguments

:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor v'1 t

image: 3-D of shape `[height, width, channels]`.

-> Tensor v'2 Int64

size: 1-D of length 2 containing: crop_height, crop_width..

-> m' (Tensor Value t)

output: 3-D of shape `[crop_height, crop_width, channels].`

randomGamma

Arguments

:: (MonadBuild m', OneOf `[Int32, Int64]` s, OneOf `[Word16, Double, Float]` t) 
=> Tensor v'1 s

shape: 1-D integer tensor. Shape of independent samples to draw from each + distribution described by the shape parameters given in alpha.

-> Tensor v'2 t

alpha: A tensor in which each scalar is a "shape" parameter describing the + associated gamma distribution.

-> m' (Tensor Value t)

output: A tensor with shape `shape + shape(alpha)`. Each slice + `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for + `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.

Outputs random values from the Gamma distribution(s) described by alpha.

This op uses the algorithm by Marsaglia et al. to acquire samples via + transformation-rejection from pairs of uniform and normal random variables. + See http://dl.acm.org/citation.cfm?id=358414

randomGamma'

Arguments

:: (MonadBuild m', OneOf `[Int32, Int64]` s, OneOf `[Word16, Double, Float]` t) 
=> OpParams 
-> Tensor v'1 s

shape: 1-D integer tensor. Shape of independent samples to draw from each + distribution described by the shape parameters given in alpha.

-> Tensor v'2 t

alpha: A tensor in which each scalar is a "shape" parameter describing the + associated gamma distribution.

-> m' (Tensor Value t)

output: A tensor with shape `shape + shape(alpha)`. Each slice + `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for + `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.

randomShuffle

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor v'1 t

value: The tensor to be shuffled.

-> m' (Tensor Value t)

output: A tensor of same shape and type as value, shuffled along its first + dimension.

Randomly shuffles a tensor along its first dimension.

The tensor is shuffled along dimension 0, such that each `value[j]` is mapped + to one and only one `output[i]`. For example, a mapping that might occur for a + 3x2 tensor is:

```prettyprint + [[1, 2], [[5, 6], + [3, 4], ==> [1, 2], + [5, 6]] [3, 4]] + ```

randomShuffle'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor v'1 t

value: The tensor to be shuffled.

-> m' (Tensor Value t)

output: A tensor of same shape and type as value, shuffled along its first + dimension.

randomShuffleQueue

Arguments

:: MonadBuild m' 
=> [DataType]

component_types: The type of each component in a value.

-> m' (Tensor Ref ByteString)

handle: The handle to the queue.

A queue that randomizes the order of elements.

randomShuffleQueue'

Arguments

:: MonadBuild m' 
=> OpParams 
-> [DataType]

component_types: The type of each component in a value.

-> m' (Tensor Ref ByteString)

handle: The handle to the queue.

randomShuffleQueueV2

Arguments

:: MonadBuild m' 
=> [DataType]

component_types: The type of each component in a value.

-> m' ResourceHandle

handle: The handle to the queue.

A queue that randomizes the order of elements.

randomShuffleQueueV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> [DataType]

component_types: The type of each component in a value.

-> m' ResourceHandle

handle: The handle to the queue.

randomStandardNormal

Arguments

:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
=> Tensor v'1 t

shape: The shape of the output tensor.

-> m' (Tensor Value dtype)

output: A tensor of the specified shape filled with random normal values.

Outputs random values from a normal distribution.

The generated values will have mean 0 and standard deviation 1.

randomStandardNormal'

Arguments

:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
=> OpParams 
-> Tensor v'1 t

shape: The shape of the output tensor.

-> m' (Tensor Value dtype)

output: A tensor of the specified shape filled with random normal values.

randomUniform

Arguments

:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
=> Tensor v'1 t

shape: The shape of the output tensor.

-> m' (Tensor Value dtype)

output: A tensor of the specified shape filled with uniform random values.

Outputs random values from a uniform distribution.

The generated values follow a uniform distribution in the range `[0, 1)`. The + lower bound 0 is included in the range, while the upper bound 1 is excluded.

randomUniform'

Arguments

:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
=> OpParams 
-> Tensor v'1 t

shape: The shape of the output tensor.

-> m' (Tensor Value dtype)

output: A tensor of the specified shape filled with uniform random values.

randomUniformInt

Arguments

:: (MonadBuild m', OneOf `[Int32, Int64]` tout, OneOf `[Int32, Int64]` t) 
=> Tensor v'1 t

shape: The shape of the output tensor.

-> Tensor v'2 tout

minval: 0-D. Inclusive lower bound on the generated integers.

-> Tensor v'3 tout

maxval: 0-D. Exclusive upper bound on the generated integers.

-> m' (Tensor Value tout)

output: A tensor of the specified shape filled with uniform random integers.

Outputs random integers from a uniform distribution.

The generated values are uniform integers in the range `[minval, maxval)`. + The lower bound minval is included in the range, while the upper bound + maxval is excluded.

The random integers are slightly biased unless `maxval - minval` is an exact + power of two. The bias is small for values of `maxval - minval` significantly + smaller than the range of the output (either `2^32` or `2^64`).

randomUniformInt'

Arguments

:: (MonadBuild m', OneOf `[Int32, Int64]` tout, OneOf `[Int32, Int64]` t) 
=> OpParams 
-> Tensor v'1 t

shape: The shape of the output tensor.

-> Tensor v'2 tout

minval: 0-D. Inclusive lower bound on the generated integers.

-> Tensor v'3 tout

maxval: 0-D. Exclusive upper bound on the generated integers.

-> m' (Tensor Value tout)

output: A tensor of the specified shape filled with uniform random integers.

range

Arguments

:: OneOf `[Int32, Int64, Double, Float]` tidx 
=> Tensor v'1 tidx

start: 0-D (scalar). First entry in the sequence.

-> Tensor v'2 tidx

limit: 0-D (scalar). Upper limit of sequence, exclusive.

-> Tensor v'3 tidx

delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

-> Tensor Build tidx

output: 1-D.

Creates a sequence of numbers.

This operation creates a sequence of numbers that begins at start and + extends by increments of delta up to but not including limit.

For example:

``` + # start is 3 + # limit is 18 + # delta is 3 + tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] + ```

range'

Arguments

:: OneOf `[Int32, Int64, Double, Float]` tidx 
=> OpParams 
-> Tensor v'1 tidx

start: 0-D (scalar). First entry in the sequence.

-> Tensor v'2 tidx

limit: 0-D (scalar). Upper limit of sequence, exclusive.

-> Tensor v'3 tidx

delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

-> Tensor Build tidx

output: 1-D.

rank

Arguments

:: TensorType t 
=> Tensor v'1 t

input

-> Tensor Build Int32

output

Returns the rank of a tensor.

This operation returns an integer representing the rank of input.

For example:

```prettyprint + # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + # shape of tensor t is [2, 2, 3] + rank(t) ==> 3 + ```

  • *Note**: The rank of a tensor is not the same as the rank of a matrix. The rank + of a tensor is the number of indices required to uniquely select each element + of the tensor. Rank is also known as "order", "degree", or "ndims."

rank'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build Int32

output

readFile

Arguments

:: Tensor v'1 ByteString

filename

-> Tensor Build ByteString

contents

Reads and outputs the entire contents of the input filename.

readFile'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

filename

-> Tensor Build ByteString

contents

readVariableOp

Arguments

:: (MonadBuild m', TensorType dtype) 
=> ResourceHandle

resource: handle to the resource in which to store the variable.

-> m' (Tensor Value dtype)

value

Reads the value of a variable.

The tensor returned by this operation is immutable.

The value returned by this operation is guaranteed to be influenced by all the + writes on which this operation depends directly or indirectly, and to not be + influenced by any of the writes which depend directly or indirectly on this + operation.

readVariableOp'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> ResourceHandle

resource: handle to the resource in which to store the variable.

-> m' (Tensor Value dtype)

value

readerNumRecordsProduced

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> m' (Tensor Value Int64)

records_produced

Returns the number of records this Reader has produced.

This is the same as the number of ReaderRead executions that have + succeeded.

readerNumRecordsProduced'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> m' (Tensor Value Int64)

records_produced

readerNumRecordsProducedV2

Arguments

:: MonadBuild m' 
=> ResourceHandle

reader_handle: Handle to a Reader.

-> m' (Tensor Value Int64)

records_produced

Returns the number of records this Reader has produced.

This is the same as the number of ReaderRead executions that have + succeeded.

readerNumRecordsProducedV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

reader_handle: Handle to a Reader.

-> m' (Tensor Value Int64)

records_produced

readerNumWorkUnitsCompleted

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> m' (Tensor Value Int64)

units_completed

Returns the number of work units this Reader has finished processing.

readerNumWorkUnitsCompleted'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> m' (Tensor Value Int64)

units_completed

readerNumWorkUnitsCompletedV2

Arguments

:: MonadBuild m' 
=> ResourceHandle

reader_handle: Handle to a Reader.

-> m' (Tensor Value Int64)

units_completed

Returns the number of work units this Reader has finished processing.

readerNumWorkUnitsCompletedV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

reader_handle: Handle to a Reader.

-> m' (Tensor Value Int64)

units_completed

readerRead

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Tensor Ref ByteString

queue_handle: Handle to a Queue, with string work items.

-> m' (Tensor Value ByteString, Tensor Value ByteString)

(key, value)

  • key: A scalar.
  • value: A scalar.

Returns the next record (key, value pair) produced by a Reader.

Will dequeue from the input queue if necessary (e.g. when the + Reader needs to start reading from a new file since it has finished + with the previous file).

readerRead'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Tensor Ref ByteString

queue_handle: Handle to a Queue, with string work items.

-> m' (Tensor Value ByteString, Tensor Value ByteString)

(key, value)

  • key: A scalar.
  • value: A scalar.

readerReadUpTo

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Tensor Ref ByteString

queue_handle: Handle to a Queue, with string work items.

-> Tensor v'3 Int64

num_records: number of records to read from Reader.

-> m' (Tensor Value ByteString, Tensor Value ByteString)

(keys, values)

  • keys: A 1-D tensor.
  • values: A 1-D tensor.

Returns up to num_records (key, value) pairs produced by a Reader.

Will dequeue from the input queue if necessary (e.g. when the + Reader needs to start reading from a new file since it has finished + with the previous file). + It may return less than num_records even before the last batch.

readerReadUpTo'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Tensor Ref ByteString

queue_handle: Handle to a Queue, with string work items.

-> Tensor v'3 Int64

num_records: number of records to read from Reader.

-> m' (Tensor Value ByteString, Tensor Value ByteString)

(keys, values)

  • keys: A 1-D tensor.
  • values: A 1-D tensor.

readerReadUpToV2

Arguments

:: MonadBuild m' 
=> ResourceHandle

reader_handle: Handle to a Reader.

-> ResourceHandle

queue_handle: Handle to a Queue, with string work items.

-> Tensor v'3 Int64

num_records: number of records to read from Reader.

-> m' (Tensor Value ByteString, Tensor Value ByteString)

(keys, values)

  • keys: A 1-D tensor.
  • values: A 1-D tensor.

Returns up to num_records (key, value) pairs produced by a Reader.

Will dequeue from the input queue if necessary (e.g. when the + Reader needs to start reading from a new file since it has finished + with the previous file). + It may return less than num_records even before the last batch.

readerReadUpToV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

reader_handle: Handle to a Reader.

-> ResourceHandle

queue_handle: Handle to a Queue, with string work items.

-> Tensor v'3 Int64

num_records: number of records to read from Reader.

-> m' (Tensor Value ByteString, Tensor Value ByteString)

(keys, values)

  • keys: A 1-D tensor.
  • values: A 1-D tensor.

readerReadV2

Arguments

:: MonadBuild m' 
=> ResourceHandle

reader_handle: Handle to a Reader.

-> ResourceHandle

queue_handle: Handle to a Queue, with string work items.

-> m' (Tensor Value ByteString, Tensor Value ByteString)

(key, value)

  • key: A scalar.
  • value: A scalar.

Returns the next record (key, value pair) produced by a Reader.

Will dequeue from the input queue if necessary (e.g. when the + Reader needs to start reading from a new file since it has finished + with the previous file).

readerReadV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

reader_handle: Handle to a Reader.

-> ResourceHandle

queue_handle: Handle to a Queue, with string work items.

-> m' (Tensor Value ByteString, Tensor Value ByteString)

(key, value)

  • key: A scalar.
  • value: A scalar.

readerReset

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> m' ControlNode 

Restore a Reader to its initial clean state.

readerReset'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> m' ControlNode 

readerResetV2

Arguments

:: MonadBuild m' 
=> ResourceHandle

reader_handle: Handle to a Reader.

-> m' ControlNode 

Restore a Reader to its initial clean state.

readerResetV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

reader_handle: Handle to a Reader.

-> m' ControlNode 

readerRestoreState

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Tensor v'2 ByteString

state: Result of a ReaderSerializeState of a Reader with type + matching reader_handle.

-> m' ControlNode 

Restore a reader to a previously saved state.

Not all Readers support being restored, so this can produce an + Unimplemented error.

readerRestoreState'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Tensor v'2 ByteString

state: Result of a ReaderSerializeState of a Reader with type + matching reader_handle.

-> m' ControlNode 

readerRestoreStateV2

Arguments

:: MonadBuild m' 
=> ResourceHandle

reader_handle: Handle to a Reader.

-> Tensor v'2 ByteString

state: Result of a ReaderSerializeState of a Reader with type + matching reader_handle.

-> m' ControlNode 

Restore a reader to a previously saved state.

Not all Readers support being restored, so this can produce an + Unimplemented error.

readerRestoreStateV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

reader_handle: Handle to a Reader.

-> Tensor v'2 ByteString

state: Result of a ReaderSerializeState of a Reader with type + matching reader_handle.

-> m' ControlNode 

readerSerializeState

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> m' (Tensor Value ByteString)

state

Produce a string tensor that encodes the state of a Reader.

Not all Readers support being serialized, so this can produce an + Unimplemented error.

readerSerializeState'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> m' (Tensor Value ByteString)

state

readerSerializeStateV2

Arguments

:: MonadBuild m' 
=> ResourceHandle

reader_handle: Handle to a Reader.

-> m' (Tensor Value ByteString)

state

Produce a string tensor that encodes the state of a Reader.

Not all Readers support being serialized, so this can produce an + Unimplemented error.

readerSerializeStateV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

reader_handle: Handle to a Reader.

-> m' (Tensor Value ByteString)

state

real

Arguments

:: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
=> Tensor v'1 t

input

-> Tensor Build tout

output

Returns the real part of a complex number.

Given a tensor input of complex numbers, this operation returns a tensor of + type float that is the real part of each element in input. All elements in + input must be complex numbers of the form \(a + bj\), where *a* is the real + part returned by this operation and *b* is the imaginary part.

For example:

``` + # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] + tf.real(input) ==> [-2.25, 3.25] + ```

real'

Arguments

:: (OneOf `[Complex Double, Complex Float]` t, OneOf `[Double, Float]` tout) 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build tout

output

realDiv

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns x / y element-wise for real types.

If x and y are reals, this will return the floating-point division.

  • NOTE*: Div supports broadcasting. More about broadcasting + here

reciprocal

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes the reciprocal of x element-wise.

I.e., \(y = 1 / x\).

reciprocalGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Computes the gradient for the inverse of x wrt its input.

Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy + is the corresponding input gradient.

reciprocalGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

recordInput

Arguments

:: MonadBuild m' 
=> m' (Tensor Value ByteString)

records: A tensor of shape [batch_size].

Emits randomized records.

recordInput'

Arguments

:: MonadBuild m' 
=> OpParams 
-> m' (Tensor Value ByteString)

records: A tensor of shape [batch_size].

reduceJoin

Arguments

:: Tensor v'1 ByteString

inputs: The input to be joined. All reduced indices must have non-zero size.

-> Tensor v'2 Int32

reduction_indices: The dimensions to reduce over. Dimensions are reduced in the + order specified. Omitting reduction_indices is equivalent to passing + `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.

-> Tensor Build ByteString

output: Has shape equal to that of the input with reduced dimensions removed or + set to `1` depending on keep_dims.

Joins a string Tensor across the given dimensions.

Computes the string join across dimensions in the given string Tensor of shape + `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input + strings with the given separator (default: empty string). Negative indices are + counted backwards from the end, with `-1` being equivalent to `n - 1`.

For example:

``` + # tensor a is [["a", "b"], ["c", "d"]] + tf.reduce_join(a, 0) ==> ["ac", "bd"] + tf.reduce_join(a, 1) ==> ["ab", "cd"] + tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] + tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] + tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] + tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] + tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] + tf.reduce_join(a, [0, 1]) ==> ["acbd"] + tf.reduce_join(a, [1, 0]) ==> ["abcd"] + tf.reduce_join(a, []) ==> ["abcd"] + ```

reduceJoin'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

inputs: The input to be joined. All reduced indices must have non-zero size.

-> Tensor v'2 Int32

reduction_indices: The dimensions to reduce over. Dimensions are reduced in the + order specified. Omitting reduction_indices is equivalent to passing + `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.

-> Tensor Build ByteString

output: Has shape equal to that of the input with reduced dimensions removed or + set to `1` depending on keep_dims.

refEnter

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref t

data: The tensor to be made available to the child frame.

-> m' (Tensor Ref t)

output: The same tensor as `data`.

Creates or finds a child frame, and makes `data` available to the child frame.

The unique frame_name is used by the Executor to identify frames. If + is_constant is true, output is a constant in the child frame; otherwise + it may be changed in the child frame. At most parallel_iterations iterations + are run in parallel in the child frame.

refEnter'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref t

data: The tensor to be made available to the child frame.

-> m' (Tensor Ref t)

output: The same tensor as `data`.

refExit

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref t

data: The tensor to be made available to the parent frame.

-> m' (Tensor Ref t)

output: The same tensor as `data`.

Exits the current frame to its parent frame.

Exit makes its input `data` available to the parent frame.

refExit'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref t

data: The tensor to be made available to the parent frame.

-> m' (Tensor Ref t)

output: The same tensor as `data`.

refIdentity

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref t

input

-> m' (Tensor Ref t)

output

Return the same ref tensor as the input ref tensor.

refIdentity'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref t

input

-> m' (Tensor Ref t)

output

refMerge

Arguments

:: (MonadBuild m', TensorType t) 
=> [Tensor Ref t]

inputs: The input tensors, exactly one of which will become available.

-> m' (Tensor Ref t, Tensor Value Int32)

(output, value_index)

  • output: Will be set to the available input tensor.
  • value_index: The index of the chosen input tensor in inputs.

Forwards the value of an available tensor from inputs to output.

Merge waits for at least one of the tensors in inputs to become available. + It is usually combined with Switch to implement branching.

Merge forwards the first tensor for become available to output, and sets + value_index to its index in inputs.

refMerge'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> [Tensor Ref t]

inputs: The input tensors, exactly one of which will become available.

-> m' (Tensor Ref t, Tensor Value Int32)

(output, value_index)

  • output: Will be set to the available input tensor.
  • value_index: The index of the chosen input tensor in inputs.

refNextIteration

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref t

data: The tensor to be made available to the next iteration.

-> m' (Tensor Ref t)

output: The same tensor as `data`.

Makes its input available to the next iteration.

refNextIteration'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref t

data: The tensor to be made available to the next iteration.

-> m' (Tensor Ref t)

output: The same tensor as `data`.

refSelect

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor v'1 Int32

index: A scalar that determines the input that gets selected.

-> [Tensor Ref t]

inputs: A list of ref tensors, one of which will be forwarded to output.

-> m' (Tensor Ref t)

output: The forwarded tensor.

Forwards the indexth element of inputs to output.

refSelect'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor v'1 Int32

index: A scalar that determines the input that gets selected.

-> [Tensor Ref t]

inputs: A list of ref tensors, one of which will be forwarded to output.

-> m' (Tensor Ref t)

output: The forwarded tensor.

refSwitch

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref t

data: The ref tensor to be forwarded to the appropriate output.

-> Tensor v'2 Bool

pred: A scalar that specifies which output port will receive data.

-> m' (Tensor Ref t, Tensor Ref t)

(output_false, output_true)

  • output_false: If pred is false, data will be forwarded to this output.
  • output_true: If pred is true, data will be forwarded to this output.

Forwards the ref tensor `data` to the output port determined by pred.

If pred is true, the `data` input is forwarded to output_true. Otherwise, + the data goes to output_false.

See also Switch and Merge.

refSwitch'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref t

data: The ref tensor to be forwarded to the appropriate output.

-> Tensor v'2 Bool

pred: A scalar that specifies which output port will receive data.

-> m' (Tensor Ref t, Tensor Ref t)

(output_false, output_true)

  • output_false: If pred is false, data will be forwarded to this output.
  • output_true: If pred is true, data will be forwarded to this output.

relu

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

features

-> Tensor Build t

activations

Computes rectified linear: `max(features, 0)`.

relu'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

features

-> Tensor Build t

activations

relu6

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

features

-> Tensor Build t

activations

Computes rectified linear 6: `min(max(features, 0), 6)`.

relu6'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

features

-> Tensor Build t

activations

relu6Grad

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

gradients: The backpropagated gradients to the corresponding Relu6 operation.

-> Tensor v'2 t

features: The features passed as input to the corresponding Relu6 operation.

-> Tensor Build t

backprops: The gradients: + `gradients * (features > 0) * (features < 6)`.

Computes rectified linear 6 gradients for a Relu6 operation.

relu6Grad'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

gradients: The backpropagated gradients to the corresponding Relu6 operation.

-> Tensor v'2 t

features: The features passed as input to the corresponding Relu6 operation.

-> Tensor Build t

backprops: The gradients: + `gradients * (features > 0) * (features < 6)`.

reluGrad

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

gradients: The backpropagated gradients to the corresponding Relu operation.

-> Tensor v'2 t

features: The features passed as input to the corresponding Relu operation, OR + the outputs of that operation (both work equivalently).

-> Tensor Build t

backprops: `gradients * (features > 0)`.

Computes rectified linear gradients for a Relu operation.

reluGrad'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

gradients: The backpropagated gradients to the corresponding Relu operation.

-> Tensor v'2 t

features: The features passed as input to the corresponding Relu operation, OR + the outputs of that operation (both work equivalently).

-> Tensor Build t

backprops: `gradients * (features > 0)`.

requantizationRange

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` tinput 
=> Tensor v'1 tinput

input

-> Tensor v'2 Float

input_min: The float value that the minimum quantized input value represents.

-> Tensor v'3 Float

input_max: The float value that the maximum quantized input value represents.

-> (Tensor Build Float, Tensor Build Float)

(output_min, output_max)

  • output_min: The computed min output.
  • output_max: the computed max output.

Given a quantized tensor described by (input, input_min, input_max), outputs a

range that covers the actual values present in that tensor. This op is + typically used to produce the requested_output_min and requested_output_max for + Requantize.

requantizationRange'

Arguments

:: OneOf `[Int16, Int32, Word16, Word8]` tinput 
=> OpParams 
-> Tensor v'1 tinput

input

-> Tensor v'2 Float

input_min: The float value that the minimum quantized input value represents.

-> Tensor v'3 Float

input_max: The float value that the maximum quantized input value represents.

-> (Tensor Build Float, Tensor Build Float)

(output_min, output_max)

  • output_min: The computed min output.
  • output_max: the computed max output.

requantize

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v'1 tinput

input

-> Tensor v'2 Float

input_min: The float value that the minimum quantized input value represents.

-> Tensor v'3 Float

input_max: The float value that the maximum quantized input value represents.

-> Tensor v'4 Float

requested_output_min: The float value that the minimum quantized output value represents.

-> Tensor v'5 Float

requested_output_max: The float value that the maximum quantized output value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(output, output_min, output_max)

  • output
  • output_min: The requested_output_min value is copied into this output.
  • output_max: The requested_output_max value is copied into this output.

Convert the quantized input tensor into a lower-precision output, using the

output range specified with requested_output_min and requested_output_max.

input_min, input_max
are scalar floats that specify the range for the float + interpretation of the input data. For example, if input_min is -1.0f and + input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.

requantize'

Arguments

:: (OneOf `[Int16, Int32, Word16, Word8]` tinput, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> OpParams 
-> Tensor v'1 tinput

input

-> Tensor v'2 Float

input_min: The float value that the minimum quantized input value represents.

-> Tensor v'3 Float

input_max: The float value that the maximum quantized input value represents.

-> Tensor v'4 Float

requested_output_min: The float value that the minimum quantized output value represents.

-> Tensor v'5 Float

requested_output_max: The float value that the maximum quantized output value represents.

-> (Tensor Build out_type, Tensor Build Float, Tensor Build Float)

(output, output_min, output_max)

  • output
  • output_min: The requested_output_min value is copied into this output.
  • output_max: The requested_output_max value is copied into this output.

reshape

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tshape) 
=> Tensor v'1 t

tensor

-> Tensor v'2 tshape

shape: Defines the shape of the output tensor.

-> Tensor Build t

output

Reshapes a tensor.

Given tensor, this operation returns a tensor that has the same values + as tensor with shape shape.

If one component of shape is the special value -1, the size of that dimension + is computed so that the total size remains constant. In particular, a shape + of `[-1]` flattens into 1-D. At most one component of shape can be -1.

If shape is 1-D or higher, then the operation returns a tensor with shape + shape filled with the values of tensor. In this case, the number of elements + implied by shape must be the same as the number of elements in tensor.

For example:

```prettyprint + # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] + # tensor t has shape [9] + reshape(t, [3, 3]) ==> [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]

# tensor t is [[[1, 1], [2, 2]], + # [[3, 3], [4, 4]]] + # tensor t has shape [2, 2, 2] + reshape(t, [2, 4]) ==> [[1, 1, 2, 2], + [3, 3, 4, 4]]

# tensor t is [[[1, 1, 1], + # [2, 2, 2]], + # [[3, 3, 3], + # [4, 4, 4]], + # [[5, 5, 5], + # [6, 6, 6]]] + # tensor t has shape [3, 2, 3] + # pass '[-1]' to flatten t + reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]

# -1 can also be used to infer the shape

# -1 is inferred to be 9: + reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + [4, 4, 4, 5, 5, 5, 6, 6, 6]] + # -1 is inferred to be 2: + reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + [4, 4, 4, 5, 5, 5, 6, 6, 6]] + # -1 is inferred to be 3: + reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], + [2, 2, 2], + [3, 3, 3]], + [[4, 4, 4], + [5, 5, 5], + [6, 6, 6]]]

# tensor t is [7] + # shape `[]` reshapes to a scalar + reshape(t, []) ==> 7 + ```

reshape'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tshape) 
=> OpParams 
-> Tensor v'1 t

tensor

-> Tensor v'2 tshape

shape: Defines the shape of the output tensor.

-> Tensor Build t

output

resizeArea

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

-> Tensor Build Float

resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

Resize images to size using area interpolation.

Input images can be of different types but output images are always float.

resizeArea'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

-> Tensor Build Float

resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

resizeBicubic

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

-> Tensor Build Float

resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

Resize images to size using bicubic interpolation.

Input images can be of different types but output images are always float.

resizeBicubic'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

-> Tensor Build Float

resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

resizeBilinear

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

-> Tensor Build Float

resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

Resize images to size using bilinear interpolation.

Input images can be of different types but output images are always float.

resizeBilinear'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

-> Tensor Build Float

resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

resizeBilinearGrad

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 Float

grads: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 t

original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, + The image tensor that was resized.

-> Tensor Build t

output: 4-D with shape `[batch, orig_height, orig_width, channels]`. + Gradients with respect to the input image. Input image must have been + float or double.

Computes the gradient of bilinear interpolation.

resizeBilinearGrad'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Float

grads: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 t

original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, + The image tensor that was resized.

-> Tensor Build t

output: 4-D with shape `[batch, orig_height, orig_width, channels]`. + Gradients with respect to the input image. Input image must have been + float or double.

resizeNearestNeighbor

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

-> Tensor Build t

resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

Resize images to size using nearest neighbor interpolation.

resizeNearestNeighbor'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + new size for the images.

-> Tensor Build t

resized_images: 4-D with shape + `[batch, new_height, new_width, channels]`.

resizeNearestNeighborGrad

Arguments

:: OneOf `[Int32, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

grads: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Int32

size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The + original input size.

-> Tensor Build t

output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients + with respect to the input image.

Computes the gradient of nearest neighbor interpolation.

resizeNearestNeighborGrad'

Arguments

:: OneOf `[Int32, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

grads: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v'2 Int32

size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The + original input size.

-> Tensor Build t

output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients + with respect to the input image.

resourceApplyAdadelta

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> ResourceHandle

accum_update: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay factor. Must be a scalar.

-> Tensor v'6 t

epsilon: Constant factor. Must be a scalar.

-> Tensor v'7 t

grad: The gradient.

-> m' ControlNode 

Update '*var' according to the adadelta scheme.

accum = rho() * accum + (1 - rho()) * grad.square(); + update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + update_accum = rho() * update_accum + (1 - rho()) * update.square(); + var -= update;

resourceApplyAdadelta'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> ResourceHandle

accum_update: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay factor. Must be a scalar.

-> Tensor v'6 t

epsilon: Constant factor. Must be a scalar.

-> Tensor v'7 t

grad: The gradient.

-> m' ControlNode 

resourceApplyAdagrad

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> m' ControlNode 

Update '*var' according to the adagrad scheme.

accum += grad * grad + var -= lr * grad * (1 / sqrt(accum))

resourceApplyAdagrad'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> m' ControlNode 

resourceApplyAdagradDA

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

gradient_accumulator: Should be from a Variable().

-> ResourceHandle

gradient_squared_accumulator: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'7 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'8 Int64

global_step: Training step number. Must be a scalar.

-> m' ControlNode 

Update '*var' according to the proximal adagrad scheme.

resourceApplyAdagradDA'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

gradient_accumulator: Should be from a Variable().

-> ResourceHandle

gradient_squared_accumulator: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'7 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'8 Int64

global_step: Training step number. Must be a scalar.

-> m' ControlNode 

resourceApplyAdam

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

m: Should be from a Variable().

-> ResourceHandle

v: Should be from a Variable().

-> Tensor v'4 t

beta1_power: Must be a scalar.

-> Tensor v'5 t

beta2_power: Must be a scalar.

-> Tensor v'6 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'7 t

beta1: Momentum factor. Must be a scalar.

-> Tensor v'8 t

beta2: Momentum factor. Must be a scalar.

-> Tensor v'9 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'10 t

grad: The gradient.

-> m' ControlNode 

Update '*var' according to the Adam algorithm.

lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) + m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t + v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t + variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)

resourceApplyAdam'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

m: Should be from a Variable().

-> ResourceHandle

v: Should be from a Variable().

-> Tensor v'4 t

beta1_power: Must be a scalar.

-> Tensor v'5 t

beta2_power: Must be a scalar.

-> Tensor v'6 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'7 t

beta1: Momentum factor. Must be a scalar.

-> Tensor v'8 t

beta2: Momentum factor. Must be a scalar.

-> Tensor v'9 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'10 t

grad: The gradient.

-> m' ControlNode 

resourceApplyCenteredRMSProp

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

mg: Should be from a Variable().

-> ResourceHandle

ms: Should be from a Variable().

-> ResourceHandle

mom: Should be from a Variable().

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

rho: Decay rate. Must be a scalar.

-> Tensor v'7 t

momentum

-> Tensor v'8 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'9 t

grad: The gradient.

-> m' ControlNode 

Update '*var' according to the centered RMSProp algorithm.

The centered RMSProp algorithm uses an estimate of the centered second moment + (i.e., the variance) for normalization, as opposed to regular RMSProp, which + uses the (uncentered) second moment. This often helps with training, but is + slightly more expensive in terms of computation and memory.

Note that in dense implementation of this algorithm, mg, ms, and mom will + update even if the grad is zero, but in this sparse implementation, mg, ms, + and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 + mean_grad = decay * mean_grad + (1-decay) * gradient

Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)

mg <- rho * mg_{t-1} + (1-rho) * grad + ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + var <- var - mom

resourceApplyCenteredRMSProp'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

mg: Should be from a Variable().

-> ResourceHandle

ms: Should be from a Variable().

-> ResourceHandle

mom: Should be from a Variable().

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

rho: Decay rate. Must be a scalar.

-> Tensor v'7 t

momentum

-> Tensor v'8 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'9 t

grad: The gradient.

-> m' ControlNode 

resourceApplyFtrl

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> ResourceHandle

linear: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

l1: L1 regulariation. Must be a scalar.

-> Tensor v'7 t

l2: L2 regulariation. Must be a scalar.

-> Tensor v'8 t

lr_power: Scaling factor. Must be a scalar.

-> m' ControlNode 

Update '*var' according to the Ftrl-proximal scheme.

accum_new = accum + grad * grad + linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new

resourceApplyFtrl'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> ResourceHandle

linear: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

l1: L1 regulariation. Must be a scalar.

-> Tensor v'7 t

l2: L2 regulariation. Must be a scalar.

-> Tensor v'8 t

lr_power: Scaling factor. Must be a scalar.

-> m' ControlNode 

resourceApplyGradientDescent

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> ResourceHandle

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

delta: The change.

-> m' ControlNode 

Update '*var' by subtracting alpha * delta from it.

resourceApplyGradientDescent'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

delta: The change.

-> m' ControlNode 

resourceApplyMomentum

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

momentum: Momentum. Must be a scalar.

-> m' ControlNode 

Update '*var' according to the momentum scheme. Set use_nesterov = True if you

want to use Nesterov momentum.

accum = accum * momentum + grad + var -= lr * accum

resourceApplyMomentum'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 t

momentum: Momentum. Must be a scalar.

-> m' ControlNode 

resourceApplyProximalAdagrad

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'5 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'6 t

grad: The gradient.

-> m' ControlNode 

Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.

accum += grad * grad + prox_v = var - lr * grad * (1 / sqrt(accum)) + var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

resourceApplyProximalAdagrad'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'4 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'5 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'6 t

grad: The gradient.

-> m' ControlNode 

resourceApplyProximalGradientDescent

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> ResourceHandle

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'4 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'5 t

delta: The change.

-> m' ControlNode 

Update '*var' as FOBOS algorithm with fixed learning rate.

prox_v = var - alpha * delta + var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

resourceApplyProximalGradientDescent'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'4 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'5 t

delta: The change.

-> m' ControlNode 

resourceApplyRMSProp

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

ms: Should be from a Variable().

-> ResourceHandle

mom: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay rate. Must be a scalar.

-> Tensor v'6 t

momentum

-> Tensor v'7 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'8 t

grad: The gradient.

-> m' ControlNode 

Update '*var' according to the RMSProp algorithm.

Note that in dense implementation of this algorithm, ms and mom will + update even if the grad is zero, but in this sparse implementation, ms + and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 + Delta = learning_rate * gradient / sqrt(mean_square + epsilon)

ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom

resourceApplyRMSProp'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

ms: Should be from a Variable().

-> ResourceHandle

mom: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay rate. Must be a scalar.

-> Tensor v'6 t

momentum

-> Tensor v'7 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'8 t

grad: The gradient.

-> m' ControlNode 

resourceGather

Arguments

:: (MonadBuild m', TensorType dtype, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle

resource

-> Tensor v'2 tindices

indices

-> m' (Tensor Value dtype)

output

Gather slices from the variable pointed to by resource according to indices.

indices must be an integer tensor of any dimension (usually 0-D or 1-D). + Produces an output tensor with shape `indices.shape + params.shape[1:]` where:

```python + # Scalar indices + output[:, ..., :] = params[indices, :, ... :]

# Vector indices + output[i, :, ..., :] = params[indices[i], :, ... :]

# Higher rank indices + output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + ```

resourceGather'

Arguments

:: (MonadBuild m', TensorType dtype, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> ResourceHandle

resource

-> Tensor v'2 tindices

indices

-> m' (Tensor Value dtype)

output

resourceScatterAdd

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle

resource: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 dtype

updates: A tensor of updated values to add to ref.

-> m' ControlNode 

Adds sparse updates to the variable referenced by resource.

This operation computes

# Scalar indices + ref[indices, ...] += updates[...]

# Vector indices (for each i) + ref[indices[i], ...] += updates[i, ...]

# High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]

Duplicate entries are handled correctly: if multiple indices reference + the same location, their contributions add.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/ScatterAdd.png" alt + /div

resourceScatterAdd'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> ResourceHandle

resource: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 dtype

updates: A tensor of updated values to add to ref.

-> m' ControlNode 

resourceSparseApplyAdadelta

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle

var

-> ResourceHandle

accum: Should be from a Variable().

-> ResourceHandle

accum_update: : Should be from a Variable().

-> Tensor v'4 t

lr: Learning rate. Must be a scalar.

-> Tensor v'5 t

rho: Decay factor. Must be a scalar.

-> Tensor v'6 t

epsilon: Constant factor. Must be a scalar.

-> Tensor v'7 t

grad: The gradient.

-> Tensor v'8 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' ControlNode 

var: Should be from a Variable().

resourceSparseApplyAdadelta'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> ResourceHandle

var

-> ResourceHandle

accum: Should be from a Variable().

-> ResourceHandle

accum_update: : Should be from a Variable().

-> Tensor v'4 t

lr: Learning rate. Must be a scalar.

-> Tensor v'5 t

rho: Decay factor. Must be a scalar.

-> Tensor v'6 t

epsilon: Constant factor. Must be a scalar.

-> Tensor v'7 t

grad: The gradient.

-> Tensor v'8 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' ControlNode 

resourceSparseApplyAdagrad

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' ControlNode 

Update relevant entries in '*var' and '*accum' according to the adagrad scheme.

That is for rows we have grad for, we update var and accum as follows: + accum += grad * grad + var -= lr * grad * (1 / sqrt(accum))

resourceSparseApplyAdagrad'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' ControlNode 

resourceSparseApplyAdagradDA

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

gradient_accumulator: Should be from a Variable().

-> ResourceHandle

gradient_squared_accumulator: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

lr: Learning rate. Must be a scalar.

-> Tensor v'7 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'8 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'9 Int64

global_step: Training step number. Must be a scalar.

-> m' ControlNode 

Update entries in '*var' and '*accum' according to the proximal adagrad scheme.

resourceSparseApplyAdagradDA'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

gradient_accumulator: Should be from a Variable().

-> ResourceHandle

gradient_squared_accumulator: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

lr: Learning rate. Must be a scalar.

-> Tensor v'7 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'8 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'9 Int64

global_step: Training step number. Must be a scalar.

-> m' ControlNode 

resourceSparseApplyCenteredRMSProp

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

mg: Should be from a Variable().

-> ResourceHandle

ms: Should be from a Variable().

-> ResourceHandle

mom: Should be from a Variable().

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

rho: Decay rate. Must be a scalar.

-> Tensor v'7 t

momentum

-> Tensor v'8 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'9 t

grad: The gradient.

-> Tensor v'10 tindices

indices: A vector of indices into the first dimension of var, ms and mom.

-> m' ControlNode 

Update '*var' according to the centered RMSProp algorithm.

The centered RMSProp algorithm uses an estimate of the centered second moment + (i.e., the variance) for normalization, as opposed to regular RMSProp, which + uses the (uncentered) second moment. This often helps with training, but is + slightly more expensive in terms of computation and memory.

Note that in dense implementation of this algorithm, mg, ms, and mom will + update even if the grad is zero, but in this sparse implementation, mg, ms, + and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 + mean_grad = decay * mean_grad + (1-decay) * gradient + Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)

ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom

resourceSparseApplyCenteredRMSProp'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

mg: Should be from a Variable().

-> ResourceHandle

ms: Should be from a Variable().

-> ResourceHandle

mom: Should be from a Variable().

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

rho: Decay rate. Must be a scalar.

-> Tensor v'7 t

momentum

-> Tensor v'8 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'9 t

grad: The gradient.

-> Tensor v'10 tindices

indices: A vector of indices into the first dimension of var, ms and mom.

-> m' ControlNode 

resourceSparseApplyFtrl

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> ResourceHandle

linear: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'7 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'8 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'9 t

lr_power: Scaling factor. Must be a scalar.

-> m' ControlNode 

Update relevant entries in '*var' according to the Ftrl-proximal scheme.

That is for rows we have grad for, we update var, accum and linear as follows: + accum_new = accum + grad * grad + linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new

resourceSparseApplyFtrl'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> ResourceHandle

linear: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'7 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'8 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'9 t

lr_power: Scaling factor. Must be a scalar.

-> m' ControlNode 

resourceSparseApplyMomentum

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

momentum: Momentum. Must be a scalar.

-> m' ControlNode 

Update relevant entries in '*var' and '*accum' according to the momentum scheme.

Set use_nesterov = True if you want to use Nesterov momentum.

That is for rows we have grad for, we update var and accum as follows:

accum = accum * momentum + grad + var -= lr * accum

resourceSparseApplyMomentum'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

momentum: Momentum. Must be a scalar.

-> m' ControlNode 

resourceSparseApplyProximalAdagrad

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'5 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'6 t

grad: The gradient.

-> Tensor v'7 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' ControlNode 

Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.

That is for rows we have grad for, we update var and accum as follows: + accum += grad * grad + prox_v = var + prox_v -= lr * grad * (1 / sqrt(accum)) + var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

resourceSparseApplyProximalAdagrad'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'5 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'6 t

grad: The gradient.

-> Tensor v'7 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' ControlNode 

resourceSparseApplyProximalGradientDescent

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'4 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'5 t

grad: The gradient.

-> Tensor v'6 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' ControlNode 

Sparse update '*var' as FOBOS algorithm with fixed learning rate.

That is for rows we have grad for, we update var as follows: + prox_v = var - alpha * grad + var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

resourceSparseApplyProximalGradientDescent'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'4 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'5 t

grad: The gradient.

-> Tensor v'6 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' ControlNode 

resourceSparseApplyRMSProp

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

ms: Should be from a Variable().

-> ResourceHandle

mom: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay rate. Must be a scalar.

-> Tensor v'6 t

momentum

-> Tensor v'7 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'8 t

grad: The gradient.

-> Tensor v'9 tindices

indices: A vector of indices into the first dimension of var, ms and mom.

-> m' ControlNode 

Update '*var' according to the RMSProp algorithm.

Note that in dense implementation of this algorithm, ms and mom will + update even if the grad is zero, but in this sparse implementation, ms + and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 + Delta = learning_rate * gradient / sqrt(mean_square + epsilon)

ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom

resourceSparseApplyRMSProp'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> ResourceHandle

var: Should be from a Variable().

-> ResourceHandle

ms: Should be from a Variable().

-> ResourceHandle

mom: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay rate. Must be a scalar.

-> Tensor v'6 t

momentum

-> Tensor v'7 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'8 t

grad: The gradient.

-> Tensor v'9 tindices

indices: A vector of indices into the first dimension of var, ms and mom.

-> m' ControlNode 

restore

Arguments

:: TensorType dt 
=> Tensor v'1 ByteString

file_pattern: Must have a single element. The pattern of the files from + which we read the tensor.

-> Tensor v'2 ByteString

tensor_name: Must have a single element. The name of the tensor to be + restored.

-> Tensor Build dt

tensor: The restored tensor.

Restores a tensor from checkpoint files.

Reads a tensor stored in one or several files. If there are several files (for + instance because a tensor was saved as slices), file_pattern may contain + wildcard symbols (* and ?) in the filename portion only, not in the + directory portion.

If a file_pattern matches several files, preferred_shard can be used to hint + in which file the requested tensor is likely to be found. This op will first + open the file at index preferred_shard in the list of matching files and try + to restore tensors from that file. Only if some tensors or tensor slices are + not found in that first file, then the Op opens all the files. Setting + preferred_shard to match the value passed as the shard input + of a matching Save Op may speed up Restore. This attribute only affects + performance, not correctness. The default value -1 means files are processed in + order.

See also RestoreSlice.

restore'

Arguments

:: TensorType dt 
=> OpParams 
-> Tensor v'1 ByteString

file_pattern: Must have a single element. The pattern of the files from + which we read the tensor.

-> Tensor v'2 ByteString

tensor_name: Must have a single element. The name of the tensor to be + restored.

-> Tensor Build dt

tensor: The restored tensor.

restoreSlice

Arguments

:: TensorType dt 
=> Tensor v'1 ByteString

file_pattern: Must have a single element. The pattern of the files from + which we read the tensor.

-> Tensor v'2 ByteString

tensor_name: Must have a single element. The name of the tensor to be + restored.

-> Tensor v'3 ByteString

shape_and_slice: Scalar. The shapes and slice specifications to use when + restoring a tensors.

-> Tensor Build dt

tensor: The restored tensor.

Restores a tensor from checkpoint files.

This is like Restore except that restored tensor can be listed as filling + only a slice of a larger tensor. shape_and_slice specifies the shape of the + larger tensor and the slice that the restored tensor covers.

The shape_and_slice input has the same format as the + elements of the shapes_and_slices input of the SaveSlices op.

restoreSlice'

Arguments

:: TensorType dt 
=> OpParams 
-> Tensor v'1 ByteString

file_pattern: Must have a single element. The pattern of the files from + which we read the tensor.

-> Tensor v'2 ByteString

tensor_name: Must have a single element. The name of the tensor to be + restored.

-> Tensor v'3 ByteString

shape_and_slice: Scalar. The shapes and slice specifications to use when + restoring a tensors.

-> Tensor Build dt

tensor: The restored tensor.

restoreV2

Arguments

:: TensorTypes dtypes 
=> Tensor v'1 ByteString

prefix: Must have a single element. The prefix of a V2 checkpoint.

-> Tensor v'2 ByteString

tensor_names: shape {N}. The names of the tensors to be restored.

-> Tensor v'3 ByteString

shape_and_slices: shape {N}. The slice specs of the tensors to be restored. + Empty strings indicate that they are non-partitioned tensors.

-> TensorList Build dtypes

tensors: shape {N}. The restored tensors, whose shapes are read from the + checkpoint directly.

Restores tensors from a V2 checkpoint.

For backward compatibility with the V1 format, this Op currently allows + restoring from a V1 checkpoint as well: + - This Op first attempts to find the V2 index file pointed to by "prefix", and + if found proceed to read it as a V2 checkpoint; + - Otherwise the V1 read path is invoked. + Relying on this behavior is not recommended, as the ability to fall back to read + V1 might be deprecated and eventually removed.

By default, restores the named tensors in full. If the caller wishes to restore + specific slices of stored tensors, "shape_and_slices" should be non-empty + strings and correspondingly well-formed.

Callers must ensure all the named tensors are indeed stored in the checkpoint.

restoreV2'

Arguments

:: TensorTypes dtypes 
=> OpParams 
-> Tensor v'1 ByteString

prefix: Must have a single element. The prefix of a V2 checkpoint.

-> Tensor v'2 ByteString

tensor_names: shape {N}. The names of the tensors to be restored.

-> Tensor v'3 ByteString

shape_and_slices: shape {N}. The slice specs of the tensors to be restored. + Empty strings indicate that they are non-partitioned tensors.

-> TensorList Build dtypes

tensors: shape {N}. The restored tensors, whose shapes are read from the + checkpoint directly.

reverse

Arguments

:: OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

tensor: Up to 8-D.

-> Tensor v'2 Bool

dims: 1-D. The dimensions to reverse.

-> Tensor Build t

output: The same shape as tensor.

Reverses specific dimensions of a tensor.

Given a tensor, and a bool tensor dims representing the dimensions + of tensor, this operation reverses each dimension i of tensor where + `dims[i]` is True.

tensor can have up to 8 dimensions. The number of dimensions + of tensor must equal the number of elements in dims. In other words:

`rank(tensor) = size(dims)`

For example:

```prettyprint + # tensor t is [[[[ 0, 1, 2, 3], + # [ 4, 5, 6, 7], + # [ 8, 9, 10, 11]], + # [[12, 13, 14, 15], + # [16, 17, 18, 19], + # [20, 21, 22, 23]]]] + # tensor t shape is [1, 2, 3, 4]

# dims is [False, False, False, True] + reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + [ 7, 6, 5, 4], + [ 11, 10, 9, 8]], + [[15, 14, 13, 12], + [19, 18, 17, 16], + [23, 22, 21, 20]]]]

# dims is [False, True, False, False] + reverse(t, dims) ==> [[[[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23] + [[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]]]

# dims is [False, False, True, False] + reverse(t, dims) ==> [[[[8, 9, 10, 11], + [4, 5, 6, 7], + [0, 1, 2, 3]] + [[20, 21, 22, 23], + [16, 17, 18, 19], + [12, 13, 14, 15]]]] + ```

reverse'

Arguments

:: OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

tensor: Up to 8-D.

-> Tensor v'2 Bool

dims: 1-D. The dimensions to reverse.

-> Tensor Build t

output: The same shape as tensor.

reverseSequence

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tlen) 
=> Int64

seq_dim: The dimension which is partially reversed.

-> Tensor v'1 t

input: The input to reverse.

-> Tensor v'2 tlen

seq_lengths: 1-D with length `input.dims(batch_dim)` and + `max(seq_lengths) < input.dims(seq_dim)`

-> Tensor Build t

output: The partially reversed input. It has the same shape as input.

Reverses variable length slices.

This op first slices input along the dimension batch_dim, and for each + slice i, reverses the first `seq_lengths[i]` elements along + the dimension seq_dim.

The elements of seq_lengths must obey `seq_lengths[i] < input.dims[seq_dim]`, + and seq_lengths must be a vector of length `input.dims[batch_dim]`.

The output slice i along dimension batch_dim is then given by input + slice i, with the first `seq_lengths[i]` slices along dimension + seq_dim reversed.

For example:

```prettyprint + # Given this: + batch_dim = 0 + seq_dim = 1 + input.dims = (4, 8, ...) + seq_lengths = [7, 2, 3, 5]

# then slices of input are reversed on seq_dim, but only up to seq_lengths: + output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] + output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] + output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] + output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]

# while entries past seq_lens are copied through: + output[0, 7:, :, ...] = input[0, 7:, :, ...] + output[1, 2:, :, ...] = input[1, 2:, :, ...] + output[2, 3:, :, ...] = input[2, 3:, :, ...] + output[3, 2:, :, ...] = input[3, 2:, :, ...] + ```

In contrast, if:

```prettyprint + # Given this: + batch_dim = 2 + seq_dim = 0 + input.dims = (8, ?, 4, ...) + seq_lengths = [7, 2, 3, 5]

# then slices of input are reversed on seq_dim, but only up to seq_lengths: + output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] + output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] + output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] + output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]

# while entries past seq_lens are copied through: + output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] + output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] + output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] + output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] + ```

reverseSequence'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tlen) 
=> OpParams 
-> Int64

seq_dim: The dimension which is partially reversed.

-> Tensor v'1 t

input: The input to reverse.

-> Tensor v'2 tlen

seq_lengths: 1-D with length `input.dims(batch_dim)` and + `max(seq_lengths) < input.dims(seq_dim)`

-> Tensor Build t

output: The partially reversed input. It has the same shape as input.

reverseV2

Arguments

:: (OneOf `[Int32, Int64]` tidx, OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v'1 t

tensor: Up to 8-D.

-> Tensor v'2 tidx

axis: 1-D. The indices of the dimensions to reverse.

-> Tensor Build t

output: The same shape as tensor.

Reverses specific dimensions of a tensor.

NOTE `tf.reverse` has now changed behavior in preparation for 1.0. + `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.

Given a tensor, and a int32 tensor axis representing the set of + dimensions of tensor to reverse. This operation reverses each dimension + i for which there exists j s.t. `axis[j] == i`.

tensor can have up to 8 dimensions. The number of dimensions specified + in axis may be 0 or more entries. If an index is specified more than + once, a InvalidArgument error is raised.

For example:

```prettyprint + # tensor t is [[[[ 0, 1, 2, 3], + # [ 4, 5, 6, 7], + # [ 8, 9, 10, 11]], + # [[12, 13, 14, 15], + # [16, 17, 18, 19], + # [20, 21, 22, 23]]]] + # tensor t shape is [1, 2, 3, 4]

# dims is [3] or dims is -1 + reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + [ 7, 6, 5, 4], + [ 11, 10, 9, 8]], + [[15, 14, 13, 12], + [19, 18, 17, 16], + [23, 22, 21, 20]]]]

# dims is '[1]' (or dims is '[-3]') + reverse(t, dims) ==> [[[[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23] + [[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]]]

# dims is '[2]' (or dims is '[-2]') + reverse(t, dims) ==> [[[[8, 9, 10, 11], + [4, 5, 6, 7], + [0, 1, 2, 3]] + [[20, 21, 22, 23], + [16, 17, 18, 19], + [12, 13, 14, 15]]]] + ```

reverseV2'

Arguments

:: (OneOf `[Int32, Int64]` tidx, OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> OpParams 
-> Tensor v'1 t

tensor: Up to 8-D.

-> Tensor v'2 tidx

axis: 1-D. The indices of the dimensions to reverse.

-> Tensor Build t

output: The same shape as tensor.

rint

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Returns element-wise integer closest to x.

If the result is midway between two representable values, + the even representable is chosen. + For example:

``` + rint(-1.5) ==> -2.0 + rint(0.5000001) ==> 1.0 + rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] + ```

rint'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

round

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Rounds the values of a tensor to the nearest integer, element-wise.

Rounds half to even. Also known as bankers rounding. If you want to round + according to the current system rounding mode use std::cint.

rsqrt

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes reciprocal of square root of x element-wise.

I.e., \(y = 1 / sqrt{x}\).

rsqrt'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

rsqrtGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Computes the gradient for the rsqrt of x wrt its input.

Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and dy + is the corresponding input gradient.

rsqrtGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

sampleDistortedBoundingBox

Arguments

:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8]` t) 
=> Tensor v'1 t

image_size: 1-D, containing `[height, width, channels]`.

-> Tensor v'2 Float

bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes + associated with the image.

-> m' (Tensor Value t, Tensor Value t, Tensor Value Float)

(begin, size, bboxes)

  • begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to + `tf.slice`.
  • size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to + `tf.slice`.
  • bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. + Provide as input to `tf.image.draw_bounding_boxes`.

Generate a single randomly distorted bounding box for an image.

Bounding box annotations are often supplied in addition to ground-truth labels + in image recognition or object localization tasks. A common technique for + training such a system is to randomly distort an image while preserving + its content, i.e. *data augmentation*. This Op outputs a randomly distorted + localization of an object, i.e. bounding box, given an image_size, + bounding_boxes and a series of constraints.

The output of this Op is a single bounding box that may be used to crop the + original image. The output is returned as 3 tensors: begin, size and + bboxes. The first 2 tensors can be fed directly into `tf.slice` to crop the + image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize + what the bounding box looks like.

Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The + bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + height of the underlying image.

For example,

```python + # Generate a single distorted bounding box. + begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bounding_boxes)

# Draw the bounding box in an image summary. + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox_for_draw) + tf.image_summary(images_with_box, image_with_box)

# Employ the bounding box to distort the image. + distorted_image = tf.slice(image, begin, size) + ```

Note that if no bounding box information is available, setting + `use_image_if_no_bounding_boxes = true` will assume there is a single implicit + bounding box covering the whole image. If use_image_if_no_bounding_boxes is + false and no bounding boxes are supplied, an error is raised.

sampleDistortedBoundingBox'

Arguments

:: (MonadBuild m', OneOf `[Int16, Int32, Int64, Int8, Word8]` t) 
=> OpParams 
-> Tensor v'1 t

image_size: 1-D, containing `[height, width, channels]`.

-> Tensor v'2 Float

bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes + associated with the image.

-> m' (Tensor Value t, Tensor Value t, Tensor Value Float)

(begin, size, bboxes)

  • begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to + `tf.slice`.
  • size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to + `tf.slice`.
  • bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. + Provide as input to `tf.image.draw_bounding_boxes`.

save

Arguments

:: (MonadBuild m', TensorTypes t) 
=> Tensor v'1 ByteString

filename: Must have a single element. The name of the file to which we write + the tensor.

-> Tensor v'2 ByteString

tensor_names: Shape `[N]`. The names of the tensors to be saved.

-> TensorList v'3 t

data: N tensors to save.

-> m' ControlNode 

Saves the input tensors to disk.

The size of tensor_names must match the number of tensors in `data`. `data[i]` + is written to filename with name `tensor_names[i]`.

See also SaveSlices.

save'

Arguments

:: (MonadBuild m', TensorTypes t) 
=> OpParams 
-> Tensor v'1 ByteString

filename: Must have a single element. The name of the file to which we write + the tensor.

-> Tensor v'2 ByteString

tensor_names: Shape `[N]`. The names of the tensors to be saved.

-> TensorList v'3 t

data: N tensors to save.

-> m' ControlNode 

saveSlices

Arguments

:: (MonadBuild m', TensorTypes t) 
=> Tensor v'1 ByteString

filename: Must have a single element. The name of the file to which we write the + tensor.

-> Tensor v'2 ByteString

tensor_names: Shape `[N]`. The names of the tensors to be saved.

-> Tensor v'3 ByteString

shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when + saving the tensors.

-> TensorList v'4 t

data: N tensors to save.

-> m' ControlNode 

Saves input tensors slices to disk.

This is like Save except that tensors can be listed in the saved file as being + a slice of a larger tensor. shapes_and_slices specifies the shape of the + larger tensor and the slice that this tensor covers. shapes_and_slices must + have as many elements as tensor_names.

Elements of the shapes_and_slices input must either be:

  • The empty string, in which case the corresponding tensor is + saved normally.
  • A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the + dimI are the dimensions of the larger tensor and `slice-spec` + specifies what part is covered by the tensor to save.

`slice-spec` itself is a :-separated list: `slice0:slice1:...:sliceN-1` + where each sliceI is either:

  • The string - meaning that the slice covers all indices of this dimension
  • `start,length` where start and length are integers. In that + case the slice covers length indices starting at start.

See also Save.

saveSlices'

Arguments

:: (MonadBuild m', TensorTypes t) 
=> OpParams 
-> Tensor v'1 ByteString

filename: Must have a single element. The name of the file to which we write the + tensor.

-> Tensor v'2 ByteString

tensor_names: Shape `[N]`. The names of the tensors to be saved.

-> Tensor v'3 ByteString

shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when + saving the tensors.

-> TensorList v'4 t

data: N tensors to save.

-> m' ControlNode 

saveV2

Arguments

:: (MonadBuild m', TensorTypes dtypes) 
=> Tensor v'1 ByteString

prefix: Must have a single element. The prefix of the V2 checkpoint to which we + write the tensors.

-> Tensor v'2 ByteString

tensor_names: shape {N}. The names of the tensors to be saved.

-> Tensor v'3 ByteString

shape_and_slices: shape {N}. The slice specs of the tensors to be saved. + Empty strings indicate that they are non-partitioned tensors.

-> TensorList v'4 dtypes

tensors: N tensors to save.

-> m' ControlNode 

Saves tensors in V2 checkpoint format.

By default, saves the named tensors in full. If the caller wishes to save + specific slices of full tensors, "shape_and_slices" should be non-empty strings + and correspondingly well-formed.

saveV2'

Arguments

:: (MonadBuild m', TensorTypes dtypes) 
=> OpParams 
-> Tensor v'1 ByteString

prefix: Must have a single element. The prefix of the V2 checkpoint to which we + write the tensors.

-> Tensor v'2 ByteString

tensor_names: shape {N}. The names of the tensors to be saved.

-> Tensor v'3 ByteString

shape_and_slices: shape {N}. The slice specs of the tensors to be saved. + Empty strings indicate that they are non-partitioned tensors.

-> TensorList v'4 dtypes

tensors: N tensors to save.

-> m' ControlNode 

scalarSummary

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 ByteString

tags: Tags for the summary.

-> Tensor v'2 t

values: Same shape as `tags. Values for the summary.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

Outputs a Summary protocol buffer with scalar values.

The input tags and values must have the same shape. The generated summary + has a summary value for each tag-value pair in tags and values.

scalarSummary'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 ByteString

tags: Tags for the summary.

-> Tensor v'2 t

values: Same shape as `tags. Values for the summary.

-> Tensor Build ByteString

summary: Scalar. Serialized Summary protocol buffer.

scatterAdd

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 t

updates: A tensor of updated values to add to ref.

-> m' (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

Adds sparse updates to a variable reference.

This operation computes

# Scalar indices + ref[indices, ...] += updates[...]

# Vector indices (for each i) + ref[indices[i], ...] += updates[i, ...]

# High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]

This operation outputs ref after the update is done. + This makes it easier to chain operations that need to use the reset value.

Duplicate entries are handled correctly: if multiple indices reference + the same location, their contributions add.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/ScatterAdd.png" alt + /div

scatterAdd'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 t

updates: A tensor of updated values to add to ref.

-> m' (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

scatterDiv

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 t

updates: A tensor of values that ref is divided by.

-> m' (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

Divides a variable reference by sparse updates.

This operation computes

# Scalar indices + ref[indices, ...] /= updates[...]

# Vector indices (for each i) + ref[indices[i], ...] /= updates[i, ...]

# High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]

This operation outputs ref after the update is done. + This makes it easier to chain operations that need to use the reset value.

Duplicate entries are handled correctly: if multiple indices reference + the same location, their contributions divide.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

scatterDiv'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 t

updates: A tensor of values that ref is divided by.

-> m' (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

scatterMul

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 t

updates: A tensor of updated values to multiply to ref.

-> m' (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

Multiplies sparse updates into a variable reference.

This operation computes

# Scalar indices + ref[indices, ...] *= updates[...]

# Vector indices (for each i) + ref[indices[i], ...] *= updates[i, ...]

# High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]

This operation outputs ref after the update is done. + This makes it easier to chain operations that need to use the reset value.

Duplicate entries are handled correctly: if multiple indices reference + the same location, their contributions multiply.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

scatterMul'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 t

updates: A tensor of updated values to multiply to ref.

-> m' (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

scatterNd

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tindices) 
=> Tensor v'1 tindices

indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

-> Tensor v'2 t

updates: A Tensor. Must have the same type as tensor. A tensor of updated values + to store in ref.

-> Tensor v'3 tindices

shape: A vector. The shape of the resulting tensor.

-> Tensor Build t

output: A new tensor with the given shape and updates applied according + to the indices.

Creates a new tensor by applying sparse updates to individual

values or slices within a zero tensor of the given shape tensor according to + indices. This operator is the inverse of the tf.gather_nd + operator which extracts values or slices from a given tensor.

TODO(simister): Add a link to Variable.getitem documentation on slice + syntax.

shape is a TensorShape with rank P and indices is a Tensor of rank + Q.

indices must be integer tensor, containing indices into shape. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

The innermost dimension of indices (with length K) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the Kth + dimension of shape.

updates is Tensor of rank `Q-1+P-K` with shape:

``` + [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]]. + ```

The simplest form of scatter is to insert individual elements in a tensor by + index. For example, say we want to insert 4 scattered elements in a rank-1 + tensor with 8 elements.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/ScatterNd1.png" alt + /div

In Python, this scatter operation would look like this:

indices = tf.constant([[4], [3], [1], [7]]) + updates = tf.constant([9, 10, 11, 12]) + shape = tf.constant([8]) + scatter = tf.scatter_nd(indices, updates, shape) + with tf.Session() as sess: + print sess.run(scatter)

The resulting tensor would look like this:

0, 11, 0, 10, 9, 0, 0, 12

We can also, insert entire slices of a higher rank tensor all at once. For + example, if we wanted to insert two slices in the first dimension of a + rank-3 tensor with two matrices of new values.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/ScatterNd2.png" alt + /div

In Python, this scatter operation would look like this:

indices = tf.constant([[0], [2]]) + updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + [7, 7, 7, 7], [8, 8, 8, 8]], + [[5, 5, 5, 5], [6, 6, 6, 6], + [7, 7, 7, 7], [8, 8, 8, 8]]]) + shape = tf.constant([4, 4, 4]) + scatter = tf.scatter_nd(indices, updates, shape) + with tf.Session() as sess: + print sess.run(scatter)

The resulting tensor would look like this:

[[5, 5, 5, 5
, [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[0, 0, 0, 0
, [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[5, 5, 5, 5
, [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[0, 0, 0, 0
, [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]

scatterNd'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor v'1 tindices

indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

-> Tensor v'2 t

updates: A Tensor. Must have the same type as tensor. A tensor of updated values + to store in ref.

-> Tensor v'3 tindices

shape: A vector. The shape of the resulting tensor.

-> Tensor Build t

output: A new tensor with the given shape and updates applied according + to the indices.

scatterNdAdd

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: A mutable Tensor. Should be from a Variable node.

-> Tensor v'2 tindices

indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

-> Tensor v'3 t

updates: A Tensor. Must have the same type as ref. A tensor of updated values + to add to ref.

-> m' (Tensor Ref t)

output_ref: Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

Applies sparse addition between updates and individual values or slices

within a given variable according to indices.

ref is a Tensor with rank P and indices is a Tensor of rank Q.

indices must be integer tensor, containing indices into ref. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

The innermost dimension of indices (with length K) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the Kth + dimension of ref.

updates is Tensor of rank `Q-1+P-K` with shape:

``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ```

For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + elements. In Python, that addition would look like this:

ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1], [7]]) + updates = tf.constant([9, 10, 11, 12]) + add = tf.scatter_nd_add(ref, indices, updates) + with tf.Session() as sess: + print sess.run(add)

The resulting update to ref would look like this:

1, 13, 3, 14, 14, 6, 7, 20

See tf.scatter_nd for more details about how to make updates to + slices.

scatterNdAdd'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

ref: A mutable Tensor. Should be from a Variable node.

-> Tensor v'2 tindices

indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

-> Tensor v'3 t

updates: A Tensor. Must have the same type as ref. A tensor of updated values + to add to ref.

-> m' (Tensor Ref t)

output_ref: Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

scatterNdSub

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: A mutable Tensor. Should be from a Variable node.

-> Tensor v'2 tindices

indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

-> Tensor v'3 t

updates: A Tensor. Must have the same type as ref. A tensor of updated values + to subtract from ref.

-> m' (Tensor Ref t)

output_ref: Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

Applies sparse subtraction between updates and individual values or slices

within a given variable according to indices.

ref is a Tensor with rank P and indices is a Tensor of rank Q.

indices must be integer tensor, containing indices into ref. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

The innermost dimension of indices (with length K) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the Kth + dimension of ref.

updates is Tensor of rank `Q-1+P-K` with shape:

``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ```

For example, say we want to subtract 4 scattered elements from a rank-1 tensor + with 8 elements. In Python, that subtraction would look like this:

ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1], [7]]) + updates = tf.constant([9, 10, 11, 12]) + sub = tf.scatter_nd_sub(ref, indices, updates) + with tf.Session() as sess: + print sess.run(sub)

The resulting update to ref would look like this:

1, -9, 3, -6, -4, 6, 7, -4

See tf.scatter_nd for more details about how to make updates to + slices.

scatterNdSub'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

ref: A mutable Tensor. Should be from a Variable node.

-> Tensor v'2 tindices

indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

-> Tensor v'3 t

updates: A Tensor. Must have the same type as ref. A tensor of updated values + to subtract from ref.

-> m' (Tensor Ref t)

output_ref: Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

scatterNdUpdate

Arguments

:: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: A mutable Tensor. Should be from a Variable node.

-> Tensor v'2 tindices

indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

-> Tensor v'3 t

updates: A Tensor. Must have the same type as ref. A tensor of updated + values to add to ref.

-> m' (Tensor Ref t)

output_ref: Same as ref. Returned as a convenience for operations that want to + use the updated values after the update is done.

Applies sparse updates to individual values or slices within a given

variable according to indices.

ref is a Tensor with rank P and indices is a Tensor of rank Q.

indices must be integer tensor, containing indices into ref. + It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

The innermost dimension of indices (with length K) corresponds to + indices into elements (if `K = P`) or slices (if `K < P`) along the Kth + dimension of ref.

updates is Tensor of rank `Q-1+P-K` with shape:

``` + [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + ```

For example, say we want to update 4 scattered elements to a rank-1 tensor to + 8 elements. In Python, that update would look like this:

ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + indices = tf.constant([[4], [3], [1] ,[7]]) + updates = tf.constant([9, 10, 11, 12]) + update = tf.scatter_nd_update(ref, indices, updates) + with tf.Session() as sess: + print sess.run(update)

The resulting update to ref would look like this:

1, 11, 3, 10, 9, 6, 7, 12

See tf.scatter_nd for more details about how to make updates to + slices.

scatterNdUpdate'

Arguments

:: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

ref: A mutable Tensor. Should be from a Variable node.

-> Tensor v'2 tindices

indices: A Tensor. Must be one of the following types: int32, int64. + A tensor of indices into ref.

-> Tensor v'3 t

updates: A Tensor. Must have the same type as ref. A tensor of updated + values to add to ref.

-> m' (Tensor Ref t)

output_ref: Same as ref. Returned as a convenience for operations that want to + use the updated values after the update is done.

scatterSub

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 t

updates: A tensor of updated values to subtract from ref.

-> m' (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

Subtracts sparse updates to a variable reference.

# Scalar indices + ref[indices, ...] -= updates[...]

# Vector indices (for each i) + ref[indices[i], ...] -= updates[i, ...]

# High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]

This operation outputs ref after the update is done. + This makes it easier to chain operations that need to use the reset value.

Duplicate entries are handled correctly: if multiple indices reference + the same location, their (negated) contributions add.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/ScatterSub.png" alt + /div

scatterSub'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 t

updates: A tensor of updated values to subtract from ref.

-> m' (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

scatterUpdate

Arguments

:: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 t

updates: A tensor of updated values to store in ref.

-> m' (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

Applies sparse updates to a variable reference.

This operation computes

# Scalar indices + ref[indices, ...] = updates[...]

# Vector indices (for each i) + ref[indices[i], ...] = updates[i, ...]

# High rank indices (for each i, ..., j) + ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]

This operation outputs ref after the update is done. + This makes it easier to chain operations that need to use the reset value.

If values in ref is to be updated more than once, because there are + duplicate entries in indices, the order at which the updates happen + for each value is undefined.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/ScatterUpdate.png" alt + /div

scatterUpdate'

Arguments

:: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v'2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v'3 t

updates: A tensor of updated values to store in ref.

-> m' (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want + to use the updated values after the update is done.

sdcaFprint

Arguments

:: Tensor v'1 ByteString

input: vector of strings to compute fingerprints on.

-> Tensor Build Int64

output: a (N,2) shaped matrix where N is the number of elements in the input + vector. Each row contains the low and high parts of the fingerprint.

Computes fingerprints of the input strings.

sdcaFprint'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

input: vector of strings to compute fingerprints on.

-> Tensor Build Int64

output: a (N,2) shaped matrix where N is the number of elements in the input + vector. Each row contains the low and high parts of the fingerprint.

sdcaOptimizer

Arguments

:: Float

l1: Symmetric l1 regularization strength.

-> Float

l2: Symmetric l2 regularization strength.

-> Int64

num_inner_iterations: Number of iterations per mini-batch.

-> Int64

num_loss_partitions: Number of partitions of the global loss function.

-> [Tensor v'1 Int64]

sparse_example_indices: a list of vectors which contain example indices.

-> [Tensor v'2 Int64]

sparse_feature_indices: a list of vectors which contain feature indices.

-> [Tensor v'3 Float]

sparse_feature_values: a list of vectors which contains feature value + associated with each feature group.

-> [Tensor v'4 Float]

dense_features: a list of matrices which contains the dense feature values.

-> Tensor v'5 Float

example_weights: a vector which contains the weight associated with each + example.

-> Tensor v'6 Float

example_labels: a vector which contains the label/target associated with each + example.

-> [Tensor v'7 Int64]

sparse_indices: a list of vectors where each value is the indices which has + corresponding weights in sparse_weights. This field maybe ommitted for the + dense approach.

-> [Tensor v'8 Float]

sparse_weights: a list of vectors where each value is the weight associated with + a sparse feature group.

-> [Tensor v'9 Float]

dense_weights: a list of vectors where the values are the weights associated + with a dense feature group.

-> Tensor v'10 Float

example_state_data: a list of vectors containing the example state data.

-> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float])

(out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights)

  • out_example_state_data: a list of vectors containing the updated example state + data.
  • out_delta_sparse_weights: a list of vectors where each value is the delta + weights associated with a sparse feature group.
  • out_delta_dense_weights: a list of vectors where the values are the delta + weights associated with a dense feature group.

Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for

linear models with L1 + L2 regularization. As global optimization objective is + strongly-convex, the optimizer optimizes the dual objective at each step. The + optimizer applies each update one example at a time. Examples are sampled + uniformly, and the optimizer is learning rate free and enjoys linear convergence + rate.

Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai; Zhang, Tong. + 2012 arXiv1211.2717S: http://arxiv.org/pdf/1211.2717v1.pdf

Loss objective = sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|

Adding vs. Averaging in Distributed Primal-Dual Optimization. + Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, + Martin Takac http://arxiv.org/abs/1502.03508

Stochastic Dual Coordinate Ascent with Adaptive Probabilities + Dominik Csiba, Zheng Qu, Peter Richtarik https://arxiv.org/abs/1502.08053

sdcaOptimizer'

Arguments

:: OpParams 
-> Float

l1: Symmetric l1 regularization strength.

-> Float

l2: Symmetric l2 regularization strength.

-> Int64

num_inner_iterations: Number of iterations per mini-batch.

-> Int64

num_loss_partitions: Number of partitions of the global loss function.

-> [Tensor v'1 Int64]

sparse_example_indices: a list of vectors which contain example indices.

-> [Tensor v'2 Int64]

sparse_feature_indices: a list of vectors which contain feature indices.

-> [Tensor v'3 Float]

sparse_feature_values: a list of vectors which contains feature value + associated with each feature group.

-> [Tensor v'4 Float]

dense_features: a list of matrices which contains the dense feature values.

-> Tensor v'5 Float

example_weights: a vector which contains the weight associated with each + example.

-> Tensor v'6 Float

example_labels: a vector which contains the label/target associated with each + example.

-> [Tensor v'7 Int64]

sparse_indices: a list of vectors where each value is the indices which has + corresponding weights in sparse_weights. This field maybe ommitted for the + dense approach.

-> [Tensor v'8 Float]

sparse_weights: a list of vectors where each value is the weight associated with + a sparse feature group.

-> [Tensor v'9 Float]

dense_weights: a list of vectors where the values are the weights associated + with a dense feature group.

-> Tensor v'10 Float

example_state_data: a list of vectors containing the example state data.

-> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float])

(out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights)

  • out_example_state_data: a list of vectors containing the updated example state + data.
  • out_delta_sparse_weights: a list of vectors where each value is the delta + weights associated with a sparse feature group.
  • out_delta_dense_weights: a list of vectors where the values are the delta + weights associated with a dense feature group.

sdcaShrinkL1

Arguments

:: MonadBuild m' 
=> Float

l1: Symmetric l1 regularization strength.

-> Float

l2: Symmetric l2 regularization strength. Should be a positive float.

-> [Tensor Ref Float]

weights: a list of vectors where each value is the weight associated with a + feature group.

-> m' ControlNode 

Applies L1 regularization shrink step on the parameters.

sdcaShrinkL1'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Float

l1: Symmetric l1 regularization strength.

-> Float

l2: Symmetric l2 regularization strength. Should be a positive float.

-> [Tensor Ref Float]

weights: a list of vectors where each value is the weight associated with a + feature group.

-> m' ControlNode 

segmentMax

Arguments

:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

Computes the maximum along segments of a tensor.

Read the section on Segmentation + for an explanation of segments.

Computes a tensor such that + \(output_i = max_j(data_j)\) where max is over j such + that `segment_ids[j] == i`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/SegmentMax.png" alt + /div

segmentMax'

Arguments

:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

segmentMean

Arguments

:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

Computes the mean along segments of a tensor.

Read the section on + Segmentation for an explanation + of segments.

Computes a tensor such that + \(output_i = frac{sum_j data_j}{N}\) where mean is + over j such that `segment_ids[j] == i` and N is the total number of + values summed.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/SegmentMean.png" alt + /div

segmentMean'

Arguments

:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

segmentMin

Arguments

:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

Computes the minimum along segments of a tensor.

Read the section on + Segmentation for an explanation + of segments.

Computes a tensor such that + \(output_i = min_j(data_j)\) where min is over j such + that `segment_ids[j] == i`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/SegmentMin.png" alt + /div

segmentMin'

Arguments

:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

segmentProd

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

Computes the product along segments of a tensor.

Read the section on + Segmentation for an explanation + of segments.

Computes a tensor such that + \(output_i = prod_j data_j\) where the product is over j such + that `segment_ids[j] == i`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/SegmentProd.png" alt + /div

segmentProd'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

segmentSum

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

Computes the sum along segments of a tensor.

Read the section on Segmentation + for an explanation of segments.

Computes a tensor such that + \(output_i = sum_j data_j\) where sum is over j such + that `segment_ids[j] == i`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/SegmentSum.png" alt + /div

segmentSum'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s + first dimension. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

select

Arguments

:: TensorType t 
=> Tensor v'1 Bool

condition

-> Tensor v'2 t

t: = A Tensor which may have the same shape as condition. + If condition is rank 1, t may have higher rank, + but its first dimension must match the size of condition.

-> Tensor v'3 t

e: = A Tensor with the same type and shape as t.

-> Tensor Build t

output: = A Tensor with the same type and shape as t and e.

Selects elements from t or e, depending on condition.

The t, and e tensors must all have the same shape, and the + output will also have that shape.

The condition tensor must be a scalar if t and e are scalars. + If t and e are vectors or higher rank, then condition must be either a + scalar, a vector with size matching the first dimension of t, or must have + the same shape as t.

The condition tensor acts as a mask that chooses, based on the value at each + element, whether the corresponding element / row in the output should be + taken from t (if true) or e (if false).

If condition is a vector and t and e are higher rank matrices, then + it chooses which row (outer dimension) to copy from t and e. + If condition has the same shape as t and e, then it chooses which + element to copy from t and e.

For example:

```prettyprint + # condition tensor is [[True, False] + # [False, True]] + # t is [[1, 2], + # [3, 4]] + # e is [[5, 6], + # [7, 8]] + select(condition, t, e) ==> [[1, 6], + [7, 4]]

# condition tensor is [True, False] + # t is [[1, 2], + # [3, 4]] + # e is [[5, 6], + # [7, 8]] + select(condition, t, e) ==> [[1, 2], + [7, 8]]

```

select'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 Bool

condition

-> Tensor v'2 t

t: = A Tensor which may have the same shape as condition. + If condition is rank 1, t may have higher rank, + but its first dimension must match the size of condition.

-> Tensor v'3 t

e: = A Tensor with the same type and shape as t.

-> Tensor Build t

output: = A Tensor with the same type and shape as t and e.

selfAdjointEig

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input: Shape is `[..., M, M]`.

-> Tensor Build t

output: Shape is `[..., M+1, M]`.

Computes the Eigen Decomposition of a batch of square self-adjoint matrices.

The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices, with the same constraints as the single matrix + SelfAdjointEig.

The result is a [..., M+1, M] matrix with [..., 0,:] containing the + eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.

selfAdjointEig'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Shape is `[..., M, M]`.

-> Tensor Build t

output: Shape is `[..., M+1, M]`.

selfAdjointEigV2

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

input: Tensor input of shape `[N, N]`.

-> (Tensor Build t, Tensor Build t)

(e, v)

  • e: Eigenvalues. Shape is `[N]`.
  • v: Eigenvectors. Shape is `[N, N]`.

Computes the eigen decomposition of one or more square self-adjoint matrices.

Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in + input such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.

```prettyprint + # a is a tensor. + # e is a tensor of eigenvalues. + # v is a tensor of eigenvectors. + e, v = self_adjoint_eig(a) + e = self_adjoint_eig(a, compute_v=False) + ```

selfAdjointEigV2'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: Tensor input of shape `[N, N]`.

-> (Tensor Build t, Tensor Build t)

(e, v)

  • e: Eigenvalues. Shape is `[N]`.
  • v: Eigenvectors. Shape is `[N, N]`.

serializeManySparse

Arguments

:: TensorType t 
=> Tensor v'1 Int64

sparse_indices: 2-D. The indices of the minibatch SparseTensor.

-> Tensor v'2 t

sparse_values: 1-D. The values of the minibatch SparseTensor.

-> Tensor v'3 Int64

sparse_shape: 1-D. The shape of the minibatch SparseTensor.

-> Tensor Build ByteString

serialized_sparse

Serialize an N-minibatch SparseTensor into an `[N, 3]` string Tensor.

The SparseTensor must have rank R greater than 1, and the first dimension + is treated as the minibatch dimension. Elements of the SparseTensor + must be sorted in increasing order of this first dimension. The serialized + SparseTensor objects going into each row of serialized_sparse will have + rank `R-1`.

The minibatch size N is extracted from `sparse_shape[0]`.

serializeManySparse'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 Int64

sparse_indices: 2-D. The indices of the minibatch SparseTensor.

-> Tensor v'2 t

sparse_values: 1-D. The values of the minibatch SparseTensor.

-> Tensor v'3 Int64

sparse_shape: 1-D. The shape of the minibatch SparseTensor.

-> Tensor Build ByteString

serialized_sparse

serializeSparse

Arguments

:: TensorType t 
=> Tensor v'1 Int64

sparse_indices: 2-D. The indices of the SparseTensor.

-> Tensor v'2 t

sparse_values: 1-D. The values of the SparseTensor.

-> Tensor v'3 Int64

sparse_shape: 1-D. The shape of the SparseTensor.

-> Tensor Build ByteString

serialized_sparse

Serialize a SparseTensor into a string 3-vector (1-D Tensor) object.

serializeSparse'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 Int64

sparse_indices: 2-D. The indices of the SparseTensor.

-> Tensor v'2 t

sparse_values: 1-D. The values of the SparseTensor.

-> Tensor v'3 Int64

sparse_shape: 1-D. The shape of the SparseTensor.

-> Tensor Build ByteString

serialized_sparse

setSize

Arguments

:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
=> Tensor v'1 Int64

set_indices: 2D Tensor, indices of a SparseTensor.

-> Tensor v'2 t

set_values: 1D Tensor, values of a SparseTensor.

-> Tensor v'3 Int64

set_shape: 1D Tensor, shape of a SparseTensor.

-> Tensor Build Int32

size: For set ranked n, this is a Tensor with rank `n-1`, and the same 1st + `n-1` dimensions as set. Each value is the number of unique elements in + the corresponding `[0...n-1]` dimension of set.

Number of unique elements along last dimension of input set.

Input set is a SparseTensor represented by set_indices, set_values, + and set_shape. The last dimension contains values in a set, duplicates are + allowed but ignored.

If validate_indices is True, this op validates the order and range of set + indices.

setSize'

Arguments

:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
=> OpParams 
-> Tensor v'1 Int64

set_indices: 2D Tensor, indices of a SparseTensor.

-> Tensor v'2 t

set_values: 1D Tensor, values of a SparseTensor.

-> Tensor v'3 Int64

set_shape: 1D Tensor, shape of a SparseTensor.

-> Tensor Build Int32

size: For set ranked n, this is a Tensor with rank `n-1`, and the same 1st + `n-1` dimensions as set. Each value is the number of unique elements in + the corresponding `[0...n-1]` dimension of set.

shape

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_type) 
=> Tensor v'1 t

input

-> Tensor Build out_type

output

Returns the shape of a tensor.

This operation returns a 1-D integer tensor representing the shape of input.

For example:

```prettyprint + # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + shape(t) ==> [2, 2, 3] + ```

shape'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_type) 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build out_type

output

shapeN

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_type) 
=> [Tensor v'1 t]

input

-> [Tensor Build out_type]

output

Returns shape of tensors.

This operation returns N 1-D integer tensors representing shape of `input[i]s`.

shapeN'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_type) 
=> OpParams 
-> [Tensor v'1 t]

input

-> [Tensor Build out_type]

output

shardedFilename

Arguments

:: Tensor v'1 ByteString

basename

-> Tensor v'2 Int32

shard

-> Tensor v'3 Int32

num_shards

-> Tensor Build ByteString

filename

Generate a sharded filename. The filename is printf formatted as

%s-%05d-of-%05d, basename, shard, num_shards.

shardedFilename'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

basename

-> Tensor v'2 Int32

shard

-> Tensor v'3 Int32

num_shards

-> Tensor Build ByteString

filename

shardedFilespec

Arguments

:: Tensor v'1 ByteString

basename

-> Tensor v'2 Int32

num_shards

-> Tensor Build ByteString

filename

Generate a glob pattern matching all sharded file names.

shardedFilespec'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

basename

-> Tensor v'2 Int32

num_shards

-> Tensor Build ByteString

filename

sigmoid

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes sigmoid of x element-wise.

Specifically, `y = 1 / (1 + exp(-x))`.

sigmoidGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Computes the gradient of the sigmoid of x wrt its input.

Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and + dy is the corresponding input gradient.

sigmoidGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

sign

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Returns an element-wise indication of the sign of a number.

`y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.

For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.

sin

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes sin of x element-wise.

sin'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

size

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_type) 
=> Tensor v'1 t

input

-> Tensor Build out_type

output

Returns the size of a tensor.

This operation returns an integer representing the number of elements in + input.

For example:

```prettyprint + # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + size(t) ==> 12 + ```

size'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_type) 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build out_type

output

skipgram

Arguments

:: MonadBuild m' 
=> Int64

batch_size: The size of produced batch.

-> m' (Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)

(vocab_word, vocab_freq, words_per_epoch, current_epoch, total_words_processed, examples, labels)

  • vocab_word: A vector of words in the corpus.
  • vocab_freq: Frequencies of words. Sorted in the non-ascending order.
  • words_per_epoch: Number of words per epoch in the data file.
  • current_epoch: The current epoch number.
  • total_words_processed: The total number of words processed so far.
  • examples: A vector of word ids.
  • labels: A vector of word ids.

Parses a text file and creates a batch of examples.

skipgram'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Int64

batch_size: The size of produced batch.

-> m' (Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)

(vocab_word, vocab_freq, words_per_epoch, current_epoch, total_words_processed, examples, labels)

  • vocab_word: A vector of words in the corpus.
  • vocab_freq: Frequencies of words. Sorted in the non-ascending order.
  • words_per_epoch: Number of words per epoch in the data file.
  • current_epoch: The current epoch number.
  • total_words_processed: The total number of words processed so far.
  • examples: A vector of word ids.
  • labels: A vector of word ids.

slice

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` index) 
=> Tensor v'1 t

input

-> Tensor v'2 index

begin: begin[i] specifies the offset into the ith dimension of + input to slice from.

-> Tensor v'3 index

size: size[i] specifies the number of elements of the ith dimension + of input to slice. If size[i] is -1, all remaining elements in dimension + i are included in the slice (i.e. this is equivalent to setting + size[i] = input.dim_size(i) - begin[i]).

-> Tensor Build t

output

Return a slice from input.

The output tensor is a tensor with dimensions described by size + whose values are extracted from input starting at the offsets in + begin.

  • Requirements*: + 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)

slice'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` index) 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 index

begin: begin[i] specifies the offset into the ith dimension of + input to slice from.

-> Tensor v'3 index

size: size[i] specifies the number of elements of the ith dimension + of input to slice. If size[i] is -1, all remaining elements in dimension + i are included in the slice (i.e. this is equivalent to setting + size[i] = input.dim_size(i) - begin[i]).

-> Tensor Build t

output

softmax

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

logits: 2-D with shape `[batch_size, num_classes]`.

-> Tensor Build t

softmax: Same shape as logits.

Computes softmax activations.

For each batch i and class j we have

softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))

softmax'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

logits: 2-D with shape `[batch_size, num_classes]`.

-> Tensor Build t

softmax: Same shape as logits.

softmaxCrossEntropyWithLogits

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> Tensor v'1 t

features: batch_size x num_classes matrix

-> Tensor v'2 t

labels: batch_size x num_classes matrix + The caller must ensure that each batch of labels represents a valid + probability distribution.

-> (Tensor Build t, Tensor Build t)

(loss, backprop)

  • loss: Per example loss (batch_size vector).
  • backprop: backpropagated gradients (batch_size x num_classes matrix).

Computes softmax cross entropy cost and gradients to backpropagate.

Inputs are the logits, not probabilities.

softmaxCrossEntropyWithLogits'

Arguments

:: OneOf `[Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

features: batch_size x num_classes matrix

-> Tensor v'2 t

labels: batch_size x num_classes matrix + The caller must ensure that each batch of labels represents a valid + probability distribution.

-> (Tensor Build t, Tensor Build t)

(loss, backprop)

  • loss: Per example loss (batch_size vector).
  • backprop: backpropagated gradients (batch_size x num_classes matrix).

softplus

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

features

-> Tensor Build t

activations

Computes softplus: `log(exp(features) + 1)`.

softplus'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

features

-> Tensor Build t

activations

softplusGrad

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

gradients: The backpropagated gradients to the corresponding softplus operation.

-> Tensor v'2 t

features: The features passed as input to the corresponding softplus operation.

-> Tensor Build t

backprops: The gradients: `gradients / (1 + exp(-features))`.

Computes softplus gradients for a softplus operation.

softplusGrad'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

gradients: The backpropagated gradients to the corresponding softplus operation.

-> Tensor v'2 t

features: The features passed as input to the corresponding softplus operation.

-> Tensor Build t

backprops: The gradients: `gradients / (1 + exp(-features))`.

softsign

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

features

-> Tensor Build t

activations

Computes softsign: `features / (abs(features) + 1)`.

softsign'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

features

-> Tensor Build t

activations

softsignGrad

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

gradients: The backpropagated gradients to the corresponding softsign operation.

-> Tensor v'2 t

features: The features passed as input to the corresponding softsign operation.

-> Tensor Build t

backprops: The gradients: `gradients / (1 + abs(-features)) ** 2`.

Computes softsign gradients for a softsign operation.

softsignGrad'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

gradients: The backpropagated gradients to the corresponding softsign operation.

-> Tensor v'2 t

features: The features passed as input to the corresponding softsign operation.

-> Tensor Build t

backprops: The gradients: `gradients / (1 + abs(-features)) ** 2`.

spaceToBatch

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
=> Int64

block_size

-> Tensor v'1 t

input: 4-D with shape `[batch, height, width, depth]`.

-> Tensor v'2 tpaddings

paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies the padding of the input with zeros across the spatial dimensions as follows:

paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]

The effective spatial dimensions of the zero-padded input tensor will be:

height_pad = pad_top + height + pad_bottom width_pad = pad_left + width + pad_right

The attr block_size must be greater than one. It indicates the block size.

  • Non-overlapping blocks of size `block_size x block size` in the height and width dimensions are rearranged into the batch dimension at each location.
  • The batch of the output tensor is `batch * block_size * block_size`.
  • Both height_pad and width_pad must be divisible by block_size.

The shape of the output will be:

[batch*block_size*block_size, height_padblock_size, width_padblock_size, @@ -591,15 +2766,44 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] ```

Among others, this operation is useful for reducing atrous convolution into - regular convolution.

-> Tensor Value t

output

SpaceToBatch for 4-D tensors of type T.

This is a legacy version of the more general SpaceToBatchND.

Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + regular convolution.

-> Tensor Build t

output

SpaceToBatch for 4-D tensors of type T.

This is a legacy version of the more general SpaceToBatchND.

Zero-pads and then rearranges (permutes) blocks of spatial data into batch. More specifically, this op outputs a copy of the input tensor where values from the height and width dimensions are moved to the batch dimension. After the zero-padding, both height and width of the input must be divisible by the - block size.

adjustHue Source

Arguments

:: Tensor v1 Float

images: Images to adjust. At least 3-D.

-> Tensor v2 Float

delta: A float delta to add to the hue.

-> Tensor Value Float

output: The hue-adjusted image or images.

Adjust the hue of one or more images.

images is a tensor of at least 3 dimensions. The last dimension is - interpretted as channels, and must be three.

The input image is considered in the RGB colorspace. Conceptually, the RGB - colors are first mapped into HSV. A delta is then applied all the hue values, - and then remapped back to RGB colorspace.

spaceToBatchND Source

Arguments

:: (TensorType t, TensorType tblock_shape, OneOf `[Int32, Int64]` tblock_shape, TensorType tpaddings, OneOf `[Int32, Int64]` tpaddings) 
=> Tensor v1 t

input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, - where spatial_shape has M dimensions.

-> Tensor v2 tblock_shape

block_shape: 1-D with shape `[M]`, all values must be >= 1.

-> Tensor v3 tpaddings

paddings: 2-D with shape `[M, 2]`, all values must be >= 0. + block size.

spaceToBatch'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tpaddings) 
=> OpParams 
-> Int64

block_size

-> Tensor v'1 t

input: 4-D with shape `[batch, height, width, depth]`.

-> Tensor v'2 tpaddings

paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + the padding of the input with zeros across the spatial dimensions as follows:

paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]

The effective spatial dimensions of the zero-padded input tensor will be:

height_pad = pad_top + height + pad_bottom + width_pad = pad_left + width + pad_right

The attr block_size must be greater than one. It indicates the block size.

  • Non-overlapping blocks of size `block_size x block size` in the height and + width dimensions are rearranged into the batch dimension at each location.
  • The batch of the output tensor is `batch * block_size * block_size`.
  • Both height_pad and width_pad must be divisible by block_size.

The shape of the output will be:

[batch*block_size*block_size, height_padblock_size, width_padblock_size, + depth]

Some examples:

  1. For the following input of shape `[1, 2, 2, 1]` and block_size of 2:

```prettyprint + x = [[[[1], [2]], [[3], [4]]]] + ```

The output tensor has shape `[4, 1, 1, 1]` and value:

```prettyprint + [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + ```

  1. For the following input of shape `[1, 2, 2, 3]` and block_size of 2:

```prettyprint + x = [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]] + ```

The output tensor has shape `[4, 1, 1, 3]` and value:

```prettyprint + [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + ```

  1. For the following input of shape `[1, 4, 4, 1]` and block_size of 2:

```prettyprint + x = [[[[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]]]] + ```

The output tensor has shape `[4, 2, 2, 1]` and value:

```prettyprint + x = [[[[1], [3]], [[5], [7]]], + [[[2], [4]], [[10], [12]]], + [[[5], [7]], [[13], [15]]], + [[[6], [8]], [[14], [16]]]] + ```

  1. For the following input of shape `[2, 2, 4, 1]` and block_size of 2:

```prettyprint + x = [[[[1], [2], [3], [4]], + [[5], [6], [7], [8]]], + [[[9], [10], [11], [12]], + [[13], [14], [15], [16]]]] + ```

The output tensor has shape `[8, 1, 2, 1]` and value:

```prettyprint + x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + ```

Among others, this operation is useful for reducing atrous convolution into + regular convolution.

-> Tensor Build t

output

spaceToBatchND

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tpaddings) 
=> Tensor v'1 t

input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + where spatial_shape has M dimensions.

-> Tensor v'2 tblock_shape

block_shape: 1-D with shape `[M]`, all values must be >= 1.

-> Tensor v'3 tpaddings

paddings: 2-D with shape `[M, 2]`, all values must be >= 0. `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension `i + 1`, which corresponds to spatial dimension i. It is required that `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.

This operation is equivalent to the following steps:

  1. Zero-pad the start and end of dimensions `[1, ..., M]` of the @@ -652,99 +2856,410 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]] ```

    Among others, this operation is useful for reducing atrous convolution into - regular convolution.

-> Tensor Value t

output

SpaceToBatch for N-D tensors of type T.

This operation divides "spatial" dimensions `[1, ..., M]` of the input into a + regular convolution.

-> Tensor Build t

output

SpaceToBatch for N-D tensors of type T.

This operation divides "spatial" dimensions `[1, ..., M]` of the input into a grid of blocks of shape block_shape, and interleaves these blocks with the "batch" dimension (0) such that in the output, the spatial dimensions `[1, ..., M]` correspond to the position within the grid, and the batch dimension combines both the position within a spatial block and the original batch position. Prior to division into blocks, the spatial dimensions of the input are optionally zero padded according to paddings. See below for a - precise description.

diagPart Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t) 
=> Tensor v1 t

input: Rank k tensor where k is 2, 4, or 6.

-> Tensor Value t

diagonal: The extracted diagonal.

Returns the diagonal part of the tensor.

This operation returns a tensor with the diagonal part - of the input. The diagonal part is computed as follows:

Assume input has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a - tensor of rank k with dimensions `[D1,..., Dk]` where:

`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.

For example:

```prettyprint - # input is [[1, 0, 0, 0] - [0, 2, 0, 0] - [0, 0, 3, 0] - [0, 0, 0, 4]]

tf.diag_part(input) ==> [1, 2, 3, 4] - ```

placeholderV2 Source

Arguments

:: TensorType dtype 
=> Shape

shape: The shape of the tensor. The shape can be any partially-specified - shape. To be unconstrained, pass in a shape with unknown rank.

-> Tensor Value dtype

output: A placeholder tensor that must be replaced using the feed mechanism.

A placeholder op for a value that will be fed into the computation.

N.B. This operation will fail with an error if it is executed. It is - intended as a way to represent a value that will always be fed, and to - provide attrs that enable the fed value to be checked at runtime.

acos Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes acos of x element-wise.

placeholder Source

Arguments

:: TensorType dtype 
=> Tensor Value dtype

output: A placeholder tensor that must be replaced using the feed mechanism.

A placeholder op for a value that will be fed into the computation.

N.B. This operation will fail with an error if it is executed. It is - intended as a way to represent a value that will always be fed, and to - provide attrs that enable the fed value to be checked at runtime.

controlTrigger :: ControlNode Source

Does nothing. Serves as a control trigger for scheduling.

Only useful as a placeholder for control edges.

atan Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes atan of x element-wise.

mirrorPad Source

Arguments

:: (TensorType t, TensorType tpaddings, OneOf `[Int32, Int64]` tpaddings) 
=> Tensor v1 t

input: The input tensor to be padded.

-> Tensor v2 tpaddings

paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

-> Tensor Value t

output: The padded tensor.

Pads a tensor with mirrored values.

This operation pads a input with mirrored values according to the paddings - you specify. paddings is an integer tensor with shape `[n, 2]`, where n is - the rank of input. For each dimension D of input, `paddings[D, 0]` indicates - how many values to add before the contents of input in that dimension, and - `paddings[D, 1]` indicates how many values to add after the contents of input - in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater - than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if copy_border is true - (if false, respectively).

The padded size of each dimension D of the output is:

`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`

For example:

```prettyprint - # t is [[1, 2, 3], [4, 5, 6]]. - # paddings is [[1, 1]], [2, 2]]. - # mode is SYMMETRIC. - # rank of t is 2. - pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] - [2, 1, 1, 2, 3, 3, 2] - [5, 4, 4, 5, 6, 6, 5] - [5, 4, 4, 5, 6, 6, 5]] - ```

where' Source

Arguments

:: Tensor v1 Bool

input

-> Tensor Value Int64

index

Returns locations of true values in a boolean tensor.

This operation returns the coordinates of true elements in input. The - coordinates are returned in a 2-D tensor where the first dimension (rows) - represents the number of true elements, and the second dimension (columns) - represents the coordinates of the true elements. Keep in mind, the shape of - the output tensor can vary depending on how many true values there are in - input. Indices are output in row-major order.

For example:

```prettyprint - # input tensor is [[True, False] - # [True, False]] - # input has two true values, so output has two coordinates. - # input has rank of 2, so coordinates have two indices. - where(input) ==> [[0, 0], - [1, 0]]

# input tensor is [[[True, False] - # [True, False]] - # [[False, True] - # [False, True]] - # [[False, False] - # [False, True]]] - # input has 5 true values, so output has 5 coordinates. - # input has rank of 3, so coordinates have three indices. - where(input) ==> [[0, 0, 0], - [0, 1, 0], - [1, 0, 1], - [1, 1, 1], - [2, 1, 1]] - ```

avgPool3DGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 Int32

orig_input_shape: The original input dimensions.

-> Tensor v2 t

grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.

-> Tensor Value t

output: The backprop for input.

Computes gradients of average pooling function.

readerReset Source

Arguments

:: Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Build ControlNode 

Restore a Reader to its initial clean state.

tileGrad Source

Arguments

:: TensorType t 
=> Tensor v1 t

input

-> Tensor v2 Int32

multiples

-> Tensor Value t

output

Returns the gradient of Tile.

Since Tile takes an input and repeats the input multiples times - along each dimension, TileGrad takes in multiples and aggregates - each repeated tile of input into output.

expandDims Source

Arguments

:: (TensorType t, TensorType tdim, OneOf `[Int32, Int64]` tdim) 
=> Tensor v1 t

input

-> Tensor v2 tdim

dim: 0-D (scalar). Specifies the dimension index at which to - expand the shape of input.

-> Tensor Value t

output: Contains the same data as input, but its shape has an additional - dimension of size 1 added.

Inserts a dimension of 1 into a tensor's shape.

Given a tensor input, this operation inserts a dimension of 1 at the - dimension index dim of input's shape. The dimension index dim starts at - zero; if you specify a negative number for dim it is counted backward from - the end.

This operation is useful if you want to add a batch dimension to a single - element. For example, if you have a single image of shape `[height, width, - channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, - which will make the shape `[1, height, width, channels]`.

Other examples:

```prettyprint - # t is a tensor of shape [2] - shape(expand_dims(t, 0)) ==> [1, 2] - shape(expand_dims(t, 1)) ==> [2, 1] - shape(expand_dims(t, -1)) ==> [2, 1]

# t2 is a tensor of shape [2, 3, 5] - shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] - shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] - shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] - ```

This operation requires that:

`-1-input.dims() <= dim <= input.dims()`

This operation is related to `squeeze()`, which removes dimensions of - size 1.

tensorSummary Source

Arguments

:: TensorType t 
=> Tensor v1 t

tensor: A tensor to serialize.

-> Tensor Value ByteString

summary

Outputs a Summary protocol buffer with a tensor.

tile Source

Arguments

:: (TensorType t, TensorType tmultiples, OneOf `[Int32, Int64]` tmultiples) 
=> Tensor v1 t

input: 1-D or higher.

-> Tensor v2 tmultiples

multiples: 1-D. Length must be the same as the number of dimensions in input

-> Tensor Value t

output

Constructs a tensor by tiling a given tensor.

This operation creates a new tensor by replicating input multiples times. - The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, - and the values of input are replicated `multiples[i]` times along the ith - dimension. For example, tiling `[a b c d]` by `[2]` produces - `[a b c d a b c d]`.

stridedSlice Source

Arguments

:: (TensorType t, TensorType index, OneOf `[Int32, Int64]` index) 
=> Tensor v1 t

input

-> Tensor v2 index

begin: `begin[k]` specifies the offset into the kth range specification. + precise description.

spaceToBatchND'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tblock_shape, OneOf `[Int32, Int64]` tpaddings) 
=> OpParams 
-> Tensor v'1 t

input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + where spatial_shape has M dimensions.

-> Tensor v'2 tblock_shape

block_shape: 1-D with shape `[M]`, all values must be >= 1.

-> Tensor v'3 tpaddings

paddings: 2-D with shape `[M, 2]`, all values must be >= 0. + `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension + `i + 1`, which corresponds to spatial dimension i. It is required that + `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.

This operation is equivalent to the following steps:

  1. Zero-pad the start and end of dimensions `[1, ..., M]` of the + input according to paddings to produce padded of shape padded_shape.
  2. Reshape padded to reshaped_padded of shape:
batch
+
padded_shape[1
/ block_shape[0], + block_shape[0], + ..., + padded_shape[M] / block_shape[M-1], + block_shape[M-1]] + + remaining_shape
  1. Permute dimensions of reshaped_padded to produce + permuted_reshaped_padded of shape:

block_shape + + [batch] + + [padded_shape[1] / block_shape[0], + ..., + padded_shape[M] / block_shape[M-1]] + + remaining_shape

  1. Reshape permuted_reshaped_padded to flatten block_shape into the batch + dimension, producing an output tensor of shape:
batch * prod(block_shape)
+
padded_shape[1
/ block_shape[0], + ..., + padded_shape[M] / block_shape[M-1]] + + remaining_shape

Some examples:

  1. For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and + `paddings = [[0, 0], [0, 0]]`:

```prettyprint + x = [[[[1], [2]], [[3], [4]]]] + ```

The output tensor has shape `[4, 1, 1, 1]` and value:

```prettyprint + [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + ```

  1. For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and + `paddings = [[0, 0], [0, 0]]`:

```prettyprint + x = [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]] + ```

The output tensor has shape `[4, 1, 1, 3]` and value:

```prettyprint + [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] + ```

  1. For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and + `paddings = [[0, 0], [0, 0]]`:

```prettyprint + x = [[[[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]]]] + ```

The output tensor has shape `[4, 2, 2, 1]` and value:

```prettyprint + x = [[[[1], [3]], [[5], [7]]], + [[[2], [4]], [[10], [12]]], + [[[5], [7]], [[13], [15]]], + [[[6], [8]], [[14], [16]]]] + ```

  1. For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and + paddings = `[[0, 0], [2, 0]]`:

```prettyprint + x = [[[[1], [2], [3], [4]], + [[5], [6], [7], [8]]], + [[[9], [10], [11], [12]], + [[13], [14], [15], [16]]]] + ```

The output tensor has shape `[8, 1, 3, 1]` and value:

```prettyprint + x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + [[[0], [2], [4]]], [[[0], [10], [12]]], + [[[0], [5], [7]]], [[[0], [13], [15]]], + [[[0], [6], [8]]], [[[0], [14], [16]]]] + ```

Among others, this operation is useful for reducing atrous convolution into + regular convolution.

-> Tensor Build t

output

spaceToDepth

Arguments

:: TensorType t 
=> Int64

block_size: The size of the spatial block.

-> Tensor v'1 t

input

-> Tensor Build t

output

SpaceToDepth for tensors of type T.

Rearranges blocks of spatial data, into depth. More specifically, + this op outputs a copy of the input tensor where values from the height + and width dimensions are moved to the depth dimension. + The attr block_size indicates the input block size and how the data is moved.

  • Non-overlapping blocks of size `block_size x block size` are rearranged + into depth at each location.
  • The depth of the output tensor is `input_depth * block_size * block_size`.
  • The input tensor's height and width must be divisible by block_size.

That is, assuming the input is in the shape: + `[batch, height, width, depth]`, + the shape of the output will be: + `[batch, heightblock_size, widthblock_size, depth*block_size*block_size]`

This operation requires that the input tensor be of rank 4, and that + block_size be >=1 and a divisor of both the input height and width.

This operation is useful for resizing the activations between convolutions + (but keeping all data), e.g. instead of pooling. It is also useful for training + purely convolutional models.

For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2:

```prettyprint + x = [[[[1], [2]], + [[3], [4]]]] + ```

This operation will output a tensor of shape `[1, 1, 1, 4]`:

```prettyprint + [[[[1, 2, 3, 4]]]] + ```

Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, + the corresponding output will have a single element (i.e. width and height are + both 1) and will have a depth of 4 channels (1 * block_size * block_size). + The output element shape is `[1, 1, 4]`.

For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.

```prettyprint + x = [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]] + ```

This operation, for block_size of 2, will return the following tensor of shape + `[1, 1, 1, 12]`

```prettyprint + [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + ```

Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:

```prettyprint + x = [[[[1], [2], [5], [6]], + [[3], [4], [7], [8]], + [[9], [10], [13], [14]], + [[11], [12], [15], [16]]]] + ```

the operator will return the following tensor of shape `[1 2 2 4]`:

```prettyprint + x = [[[[1, 2, 3, 4], + [5, 6, 7, 8]], + [[9, 10, 11, 12], + [13, 14, 15, 16]]]] + ```

spaceToDepth'

Arguments

:: TensorType t 
=> OpParams 
-> Int64

block_size: The size of the spatial block.

-> Tensor v'1 t

input

-> Tensor Build t

output

sparseAccumulatorApplyGradient

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> Bool

has_known_shape: Boolean indicating whether gradient_shape is unknown, in which + case the input is ignored during validation.

-> Tensor Ref ByteString

handle: The handle to a accumulator.

-> Tensor v'2 Int64

local_step: The local_step value at which the sparse gradient was computed.

-> Tensor v'3 Int64

gradient_indices: Indices of the sparse gradient to be accumulated. Must be a + vector.

-> Tensor v'4 dtype

gradient_values: Values are the non-zero slices of the gradient, and must have + the same first dimension as indices, i.e., the nnz represented by indices and + values must be consistent.

-> Tensor v'5 Int64

gradient_shape: Shape of the sparse gradient to be accumulated.

-> m' ControlNode 

Applies a sparse gradient to a given accumulator. Does not add if local_step is

lesser than the accumulator's global_step.

sparseAccumulatorApplyGradient'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> OpParams 
-> Bool

has_known_shape: Boolean indicating whether gradient_shape is unknown, in which + case the input is ignored during validation.

-> Tensor Ref ByteString

handle: The handle to a accumulator.

-> Tensor v'2 Int64

local_step: The local_step value at which the sparse gradient was computed.

-> Tensor v'3 Int64

gradient_indices: Indices of the sparse gradient to be accumulated. Must be a + vector.

-> Tensor v'4 dtype

gradient_values: Values are the non-zero slices of the gradient, and must have + the same first dimension as indices, i.e., the nnz represented by indices and + values must be consistent.

-> Tensor v'5 Int64

gradient_shape: Shape of the sparse gradient to be accumulated.

-> m' ControlNode 

sparseAccumulatorTakeGradient

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> Tensor Ref ByteString

handle: The handle to a SparseConditionalAccumulator.

-> Tensor v'2 Int32

num_required: Number of gradients required before we return an aggregate.

-> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

(indices, values, shape)

  • indices: Indices of the average of the accumulated sparse gradients.
  • values: Values of the average of the accumulated sparse gradients.
  • shape: Shape of the average of the accumulated sparse gradients.

Extracts the average sparse gradient in the given SparseConditionalAccumulator,

provided that sufficient (i.e., more than num_required) gradients have been + accumulated. The op will blocks until sufficient gradients have been + accumulated. If the accumulator has already aggregated more than num_required + gradients, it will return its average of the accumulated gradients. + Also automatically increments the recorded global_step in the accumulator by 1, + and resets the aggregate to 0.

sparseAccumulatorTakeGradient'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a SparseConditionalAccumulator.

-> Tensor v'2 Int32

num_required: Number of gradients required before we return an aggregate.

-> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

(indices, values, shape)

  • indices: Indices of the average of the accumulated sparse gradients.
  • values: Values of the average of the accumulated sparse gradients.
  • shape: Shape of the average of the accumulated sparse gradients.

sparseAdd

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` treal) 
=> Tensor v'1 Int64

a_indices: 2-D. The indices of the first SparseTensor, size `[nnz, ndims]` Matrix.

-> Tensor v'2 t

a_values: 1-D. The values of the first SparseTensor, size `[nnz]` Vector.

-> Tensor v'3 Int64

a_shape: 1-D. The shape of the first SparseTensor, size `[ndims]` Vector.

-> Tensor v'4 Int64

b_indices: 2-D. The indices of the second SparseTensor, size `[nnz, ndims]` Matrix.

-> Tensor v'5 t

b_values: 1-D. The values of the second SparseTensor, size `[nnz]` Vector.

-> Tensor v'6 Int64

b_shape: 1-D. The shape of the second SparseTensor, size `[ndims]` Vector.

-> Tensor v'7 treal

thresh: 0-D. The magnitude threshold that determines if an output value/index + pair takes space.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(sum_indices, sum_values, sum_shape)

  • sum_indices
  • sum_values
  • sum_shape

Adds two SparseTensor objects to produce another SparseTensor.

The input SparseTensor objects' indices are assumed ordered in standard + lexicographic order. If this is not the case, before this step run + SparseReorder to restore index ordering.

By default, if two values sum to zero at some index, the output SparseTensor + would still include that particular location in its index, storing a zero in the + corresponding value slot. To override this, callers can specify thresh, + indicating that if the sum has a magnitude strictly smaller than thresh, its + corresponding value and index would then not be included. In particular, + `thresh == 0` (default) means everything is kept and actual thresholding happens + only for a positive value.

In the following shapes, nnz is the count after taking thresh into account.

sparseAdd'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` treal) 
=> OpParams 
-> Tensor v'1 Int64

a_indices: 2-D. The indices of the first SparseTensor, size `[nnz, ndims]` Matrix.

-> Tensor v'2 t

a_values: 1-D. The values of the first SparseTensor, size `[nnz]` Vector.

-> Tensor v'3 Int64

a_shape: 1-D. The shape of the first SparseTensor, size `[ndims]` Vector.

-> Tensor v'4 Int64

b_indices: 2-D. The indices of the second SparseTensor, size `[nnz, ndims]` Matrix.

-> Tensor v'5 t

b_values: 1-D. The values of the second SparseTensor, size `[nnz]` Vector.

-> Tensor v'6 Int64

b_shape: 1-D. The shape of the second SparseTensor, size `[ndims]` Vector.

-> Tensor v'7 treal

thresh: 0-D. The magnitude threshold that determines if an output value/index + pair takes space.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(sum_indices, sum_values, sum_shape)

  • sum_indices
  • sum_values
  • sum_shape

sparseAddGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to + the non-empty values of the sum.

-> Tensor v'2 Int64

a_indices: 2-D. The indices of the SparseTensor A, size `[nnz(A), ndims]`.

-> Tensor v'3 Int64

b_indices: 2-D. The indices of the SparseTensor B, size `[nnz(B), ndims]`.

-> Tensor v'4 Int64

sum_indices: 2-D. The indices of the sum SparseTensor, size + `[nnz(sum), ndims]`.

-> (Tensor Build t, Tensor Build t)

(a_val_grad, b_val_grad)

  • a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the + non-empty values of A.
  • b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the + non-empty values of B.

The gradient operator for the SparseAdd op.

The SparseAdd op calculates A + B, where A, B, and the sum are all represented + as SparseTensor objects. This op takes in the upstream gradient w.r.t. + non-empty values of the sum, and outputs the gradients w.r.t. the non-empty + values of A and B.

sparseAddGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to + the non-empty values of the sum.

-> Tensor v'2 Int64

a_indices: 2-D. The indices of the SparseTensor A, size `[nnz(A), ndims]`.

-> Tensor v'3 Int64

b_indices: 2-D. The indices of the SparseTensor B, size `[nnz(B), ndims]`.

-> Tensor v'4 Int64

sum_indices: 2-D. The indices of the sum SparseTensor, size + `[nnz(sum), ndims]`.

-> (Tensor Build t, Tensor Build t)

(a_val_grad, b_val_grad)

  • a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the + non-empty values of A.
  • b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the + non-empty values of B.

sparseApplyAdadelta

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

accum_update: : Should be from a Variable().

-> Tensor v'4 t

lr: Learning rate. Must be a scalar.

-> Tensor v'5 t

rho: Decay factor. Must be a scalar.

-> Tensor v'6 t

epsilon: Constant factor. Must be a scalar.

-> Tensor v'7 t

grad: The gradient.

-> Tensor v'8 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' (Tensor Ref t)

out: Same as "var".

var: Should be from a Variable().

sparseApplyAdadelta'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

var

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

accum_update: : Should be from a Variable().

-> Tensor v'4 t

lr: Learning rate. Must be a scalar.

-> Tensor v'5 t

rho: Decay factor. Must be a scalar.

-> Tensor v'6 t

epsilon: Constant factor. Must be a scalar.

-> Tensor v'7 t

grad: The gradient.

-> Tensor v'8 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' (Tensor Ref t)

out: Same as "var".

sparseApplyAdagrad

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' (Tensor Ref t)

out: Same as "var".

Update relevant entries in '*var' and '*accum' according to the adagrad scheme.

That is for rows we have grad for, we update var and accum as follows: + accum += grad * grad + var -= lr * grad * (1 / sqrt(accum))

sparseApplyAdagrad'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' (Tensor Ref t)

out: Same as "var".

sparseApplyAdagradDA

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

gradient_accumulator: Should be from a Variable().

-> Tensor Ref t

gradient_squared_accumulator: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

lr: Learning rate. Must be a scalar.

-> Tensor v'7 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'8 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'9 Int64

global_step: Training step number. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

Update entries in '*var' and '*accum' according to the proximal adagrad scheme.

sparseApplyAdagradDA'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

gradient_accumulator: Should be from a Variable().

-> Tensor Ref t

gradient_squared_accumulator: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

lr: Learning rate. Must be a scalar.

-> Tensor v'7 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'8 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'9 Int64

global_step: Training step number. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

sparseApplyCenteredRMSProp

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

mg: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

rho: Decay rate. Must be a scalar.

-> Tensor v'7 t

momentum

-> Tensor v'8 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'9 t

grad: The gradient.

-> Tensor v'10 tindices

indices: A vector of indices into the first dimension of var, ms and mom.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' according to the centered RMSProp algorithm.

The centered RMSProp algorithm uses an estimate of the centered second moment + (i.e., the variance) for normalization, as opposed to regular RMSProp, which + uses the (uncentered) second moment. This often helps with training, but is + slightly more expensive in terms of computation and memory.

Note that in dense implementation of this algorithm, mg, ms, and mom will + update even if the grad is zero, but in this sparse implementation, mg, ms, + and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 + mean_grad = decay * mean_grad + (1-decay) * gradient + Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)

ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom

sparseApplyCenteredRMSProp'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

mg: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v'5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'6 t

rho: Decay rate. Must be a scalar.

-> Tensor v'7 t

momentum

-> Tensor v'8 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'9 t

grad: The gradient.

-> Tensor v'10 tindices

indices: A vector of indices into the first dimension of var, ms and mom.

-> m' (Tensor Ref t)

out: Same as "var".

sparseApplyFtrl

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

linear: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'7 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'8 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'9 t

lr_power: Scaling factor. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

Update relevant entries in '*var' according to the Ftrl-proximal scheme.

That is for rows we have grad for, we update var, accum and linear as follows: + accum_new = accum + grad * grad + linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + accum = accum_new

sparseApplyFtrl'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

linear: Should be from a Variable().

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'7 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'8 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'9 t

lr_power: Scaling factor. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

sparseApplyMomentum

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

momentum: Momentum. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

Update relevant entries in '*var' and '*accum' according to the momentum scheme.

Set use_nesterov = True if you want to use Nesterov momentum.

That is for rows we have grad for, we update var and accum as follows:

accum = accum * momentum + grad + var -= lr * accum

sparseApplyMomentum'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

grad: The gradient.

-> Tensor v'5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v'6 t

momentum: Momentum. Must be a scalar.

-> m' (Tensor Ref t)

out: Same as "var".

sparseApplyProximalAdagrad

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'5 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'6 t

grad: The gradient.

-> Tensor v'7 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' (Tensor Ref t)

out: Same as "var".

Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.

That is for rows we have grad for, we update var and accum as follows: + accum += grad * grad + prox_v = var + prox_v -= lr * grad * (1 / sqrt(accum)) + var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

sparseApplyProximalAdagrad'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v'3 t

lr: Learning rate. Must be a scalar.

-> Tensor v'4 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'5 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'6 t

grad: The gradient.

-> Tensor v'7 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' (Tensor Ref t)

out: Same as "var".

sparseApplyProximalGradientDescent

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'4 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'5 t

grad: The gradient.

-> Tensor v'6 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' (Tensor Ref t)

out: Same as "var".

Sparse update '*var' as FOBOS algorithm with fixed learning rate.

That is for rows we have grad for, we update var as follows: + prox_v = var - alpha * grad + var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

sparseApplyProximalGradientDescent'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor v'2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v'3 t

l1: L1 regularization. Must be a scalar.

-> Tensor v'4 t

l2: L2 regularization. Must be a scalar.

-> Tensor v'5 t

grad: The gradient.

-> Tensor v'6 tindices

indices: A vector of indices into the first dimension of var and accum.

-> m' (Tensor Ref t)

out: Same as "var".

sparseApplyRMSProp

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay rate. Must be a scalar.

-> Tensor v'6 t

momentum

-> Tensor v'7 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'8 t

grad: The gradient.

-> Tensor v'9 tindices

indices: A vector of indices into the first dimension of var, ms and mom.

-> m' (Tensor Ref t)

out: Same as "var".

Update '*var' according to the RMSProp algorithm.

Note that in dense implementation of this algorithm, ms and mom will + update even if the grad is zero, but in this sparse implementation, ms + and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 + Delta = learning_rate * gradient / sqrt(mean_square + epsilon)

ms <- rho * ms_{t-1} + (1-rho) * grad * grad + mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + var <- var - mom

sparseApplyRMSProp'

Arguments

:: (MonadBuild m', OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v'4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v'5 t

rho: Decay rate. Must be a scalar.

-> Tensor v'6 t

momentum

-> Tensor v'7 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v'8 t

grad: The gradient.

-> Tensor v'9 tindices

indices: A vector of indices into the first dimension of var, ms and mom.

-> m' (Tensor Ref t)

out: Same as "var".

sparseConcat

Arguments

:: TensorType t 
=> Int64

concat_dim: Dimension to concatenate along. Must be in range [-rank, rank), + where rank is the number of dimensions in each input SparseTensor.

-> [Tensor v'1 Int64]

indices: 2-D. Indices of each input SparseTensor.

-> [Tensor v'2 t]

values: 1-D. Non-empty values of each SparseTensor.

-> [Tensor v'3 Int64]

shapes: 1-D. Shapes of each SparseTensor.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(output_indices, output_values, output_shape)

  • output_indices: 2-D. Indices of the concatenated SparseTensor.
  • output_values: 1-D. Non-empty values of the concatenated SparseTensor.
  • output_shape: 1-D. Shape of the concatenated SparseTensor.

Concatenates a list of SparseTensor along the specified dimension.

Concatenation is with respect to the dense versions of these sparse tensors. + It is assumed that each input is a SparseTensor whose elements are ordered + along increasing dimension number.

All inputs' shapes must match, except for the concat dimension. The + indices, values, and shapes lists must have the same length.

The output shape is identical to the inputs', except along the concat + dimension, where it is the sum of the inputs' sizes along that dimension.

The output elements will be resorted to preserve the sort order along + increasing dimension number.

This op runs in `O(M log M)` time, where M is the total number of non-empty + values across all inputs. This is due to the need for an internal sort in + order to concatenate efficiently across an arbitrary dimension.

For example, if `concat_dim = 1` and the inputs are

sp_inputs[0]: shape = [2, 3] + [0, 2]: "a" + [1, 0]: "b" + [1, 1]: "c"

sp_inputs[1]: shape = [2, 4] + [0, 1]: "d" + [0, 2]: "e"

then the output will be

shape = [2, 7] + [0, 2]: "a" + [0, 4]: "d" + [0, 5]: "e" + [1, 0]: "b" + [1, 1]: "c"

Graphically this is equivalent to doing

a
concat [ d e ] = [ a d e ]
b c
[ ] [b c ]

sparseConcat'

Arguments

:: TensorType t 
=> OpParams 
-> Int64

concat_dim: Dimension to concatenate along. Must be in range [-rank, rank), + where rank is the number of dimensions in each input SparseTensor.

-> [Tensor v'1 Int64]

indices: 2-D. Indices of each input SparseTensor.

-> [Tensor v'2 t]

values: 1-D. Non-empty values of each SparseTensor.

-> [Tensor v'3 Int64]

shapes: 1-D. Shapes of each SparseTensor.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(output_indices, output_values, output_shape)

  • output_indices: 2-D. Indices of the concatenated SparseTensor.
  • output_values: 1-D. Non-empty values of the concatenated SparseTensor.
  • output_shape: 1-D. Shape of the concatenated SparseTensor.

sparseConditionalAccumulator

Arguments

:: MonadBuild m' 
=> DataType

dtype: The type of the value being accumulated.

-> Shape

shape: The shape of the values.

-> m' (Tensor Ref ByteString)

handle: The handle to the accumulator.

A conditional accumulator for aggregating sparse gradients. The accumulator

accepts gradients marked with local_step greater or equal to the most recent + global_step known to the accumulator. The average can be extracted from the + accumulator, provided sufficient gradients have been accumulated. Extracting the + average automatically resets the aggregate to 0, and increments the global_step + recorded by the accumulator.

sparseConditionalAccumulator'

Arguments

:: MonadBuild m' 
=> OpParams 
-> DataType

dtype: The type of the value being accumulated.

-> Shape

shape: The shape of the values.

-> m' (Tensor Ref ByteString)

handle: The handle to the accumulator.

sparseDenseCwiseAdd

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 Int64

sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

sp_values: 1-D. N non-empty values corresponding to sp_indices.

-> Tensor v'3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 t

dense: R-D. The dense Tensor operand.

-> Tensor Build t

output: 1-D. The N values that are operated on.

Adds up a SparseTensor and a dense Tensor, using these special rules:

  1. Broadcasts the dense side to have the same shape as the sparse side, if + eligible;
  2. Then, only the dense values pointed to by the indices of the SparseTensor + participate in the cwise addition.

By these rules, the result is a logical SparseTensor with exactly the same + indices and shape, but possibly with different non-zero values. The output of + this Op is the resultant non-zero values.

sparseDenseCwiseAdd'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int64

sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

sp_values: 1-D. N non-empty values corresponding to sp_indices.

-> Tensor v'3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 t

dense: R-D. The dense Tensor operand.

-> Tensor Build t

output: 1-D. The N values that are operated on.

sparseDenseCwiseDiv

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 Int64

sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

sp_values: 1-D. N non-empty values corresponding to sp_indices.

-> Tensor v'3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 t

dense: R-D. The dense Tensor operand.

-> Tensor Build t

output: 1-D. The N values that are operated on.

Component-wise divides a SparseTensor by a dense Tensor.

  • Limitation*: this Op only broadcasts the dense side to the sparse side, but not + the other direction.

sparseDenseCwiseDiv'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int64

sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

sp_values: 1-D. N non-empty values corresponding to sp_indices.

-> Tensor v'3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 t

dense: R-D. The dense Tensor operand.

-> Tensor Build t

output: 1-D. The N values that are operated on.

sparseDenseCwiseMul

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 Int64

sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

sp_values: 1-D. N non-empty values corresponding to sp_indices.

-> Tensor v'3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 t

dense: R-D. The dense Tensor operand.

-> Tensor Build t

output: 1-D. The N values that are operated on.

Component-wise multiplies a SparseTensor by a dense Tensor.

The output locations corresponding to the implicitly zero elements in the sparse + tensor will be zero (i.e., will not take up storage space), regardless of the + contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).

  • Limitation*: this Op only broadcasts the dense side to the sparse side, but not + the other direction.

sparseDenseCwiseMul'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int64

sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

sp_values: 1-D. N non-empty values corresponding to sp_indices.

-> Tensor v'3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 t

dense: R-D. The dense Tensor operand.

-> Tensor Build t

output: 1-D. The N values that are operated on.

sparseMatMul

Arguments

:: (OneOf `[Word16, Float]` ta, OneOf `[Word16, Float]` tb) 
=> Tensor v'1 ta

a

-> Tensor v'2 tb

b

-> Tensor Build Float

product

Multiply matrix "a" by matrix "b".

The inputs must be two-dimensional matrices and the inner dimension of "a" must + match the outer dimension of "b". This op is optimized for the case where at + least one of "a" or "b" is sparse. The breakeven for using this versus a dense + matrix multiply on one platform was 30% zero values in the sparse matrix.

sparseMatMul'

Arguments

:: (OneOf `[Word16, Float]` ta, OneOf `[Word16, Float]` tb) 
=> OpParams 
-> Tensor v'1 ta

a

-> Tensor v'2 tb

b

-> Tensor Build Float

product

sparseReduceSum

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 Int64

input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

input_values: 1-D. N non-empty values corresponding to input_indices.

-> Tensor v'3 Int64

input_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 Int32

reduction_axes: 1-D. Length-K vector containing the reduction axes.

-> Tensor Build t

output: `R-K`-D. The reduced Tensor.

Computes the sum of elements across dimensions of a SparseTensor.

This Op takes a SparseTensor and is the sparse counterpart to + `tf.reduce_sum()`. In particular, this Op also returns a dense Tensor + instead of a sparse one.

Reduces sp_input along the dimensions given in reduction_axes. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_axes. If keep_dims is true, the reduced dimensions are retained + with length 1.

If reduction_axes has no entries, all dimensions are reduced, and a tensor + with a single element is returned. Additionally, the axes can be negative, + which are interpreted according to the indexing rules in Python.

sparseReduceSum'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int64

input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

input_values: 1-D. N non-empty values corresponding to input_indices.

-> Tensor v'3 Int64

input_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 Int32

reduction_axes: 1-D. Length-K vector containing the reduction axes.

-> Tensor Build t

output: `R-K`-D. The reduced Tensor.

sparseReduceSumSparse

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 Int64

input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

input_values: 1-D. N non-empty values corresponding to input_indices.

-> Tensor v'3 Int64

input_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 Int32

reduction_axes: 1-D. Length-K vector containing the reduction axes.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(output_indices, output_values, output_shape)

  • output_indices
  • output_values
  • output_shape

Computes the sum of elements across dimensions of a SparseTensor.

This Op takes a SparseTensor and is the sparse counterpart to + `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a + SparseTensor.

Reduces sp_input along the dimensions given in reduction_axes. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_axes. If keep_dims is true, the reduced dimensions are retained + with length 1.

If reduction_axes has no entries, all dimensions are reduced, and a tensor + with a single element is returned. Additionally, the axes can be negative, + which are interpreted according to the indexing rules in Python.

sparseReduceSumSparse'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int64

input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

input_values: 1-D. N non-empty values corresponding to input_indices.

-> Tensor v'3 Int64

input_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 Int32

reduction_axes: 1-D. Length-K vector containing the reduction axes.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(output_indices, output_values, output_shape)

  • output_indices
  • output_values
  • output_shape

sparseReorder

Arguments

:: TensorType t 
=> Tensor v'1 Int64

input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

input_values: 1-D. N non-empty values corresponding to input_indices.

-> Tensor v'3 Int64

input_shape: 1-D. Shape of the input SparseTensor.

-> (Tensor Build Int64, Tensor Build t)

(output_indices, output_values)

  • output_indices: 2-D. `N x R` matrix with the same indices as input_indices, but + in canonical row-major ordering.
  • output_values: 1-D. N non-empty values corresponding to output_indices.

Reorders a SparseTensor into the canonical, row-major ordering.

Note that by convention, all sparse ops preserve the canonical ordering along + increasing dimension number. The only time ordering can be violated is during + manual manipulation of the indices and values vectors to add entries.

Reordering does not affect the shape of the SparseTensor.

If the tensor has rank R and N non-empty values, input_indices has + shape `[N, R]`, input_values has length N, and input_shape has length R.

sparseReorder'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 Int64

input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, possibly not in canonical ordering.

-> Tensor v'2 t

input_values: 1-D. N non-empty values corresponding to input_indices.

-> Tensor v'3 Int64

input_shape: 1-D. Shape of the input SparseTensor.

-> (Tensor Build Int64, Tensor Build t)

(output_indices, output_values)

  • output_indices: 2-D. `N x R` matrix with the same indices as input_indices, but + in canonical row-major ordering.
  • output_values: 1-D. N non-empty values corresponding to output_indices.

sparseReshape

Arguments

:: Tensor v'1 Int64

input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a + SparseTensor.

-> Tensor v'2 Int64

input_shape: 1-D. R_in vector with the input SparseTensor's dense shape.

-> Tensor v'3 Int64

new_shape: 1-D. R_out vector with the requested new dense shape.

-> (Tensor Build Int64, Tensor Build Int64)

(output_indices, output_shape)

  • output_indices: 2-D. `N x R_out` matrix with the updated indices of non-empty + values in the output SparseTensor.
  • output_shape: 1-D. R_out vector with the full dense shape of the output + SparseTensor. This is the same as new_shape but with any -1 dimensions + filled in.

Reshapes a SparseTensor to represent values in a new dense shape.

This operation has the same semantics as reshape on the represented dense + tensor. The input_indices are recomputed based on the requested new_shape.

If one component of new_shape is the special value -1, the size of that + dimension is computed so that the total dense size remains constant. At + most one component of new_shape can be -1. The number of dense elements + implied by new_shape must be the same as the number of dense elements + originally implied by input_shape.

Reshaping does not affect the order of values in the SparseTensor.

If the input tensor has rank R_in and N non-empty values, and new_shape + has length R_out, then input_indices has shape `[N, R_in]`, + input_shape has length R_in, output_indices has shape `[N, R_out]`, and + output_shape has length R_out.

sparseReshape'

Arguments

:: OpParams 
-> Tensor v'1 Int64

input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a + SparseTensor.

-> Tensor v'2 Int64

input_shape: 1-D. R_in vector with the input SparseTensor's dense shape.

-> Tensor v'3 Int64

new_shape: 1-D. R_out vector with the requested new dense shape.

-> (Tensor Build Int64, Tensor Build Int64)

(output_indices, output_shape)

  • output_indices: 2-D. `N x R_out` matrix with the updated indices of non-empty + values in the output SparseTensor.
  • output_shape: 1-D. R_out vector with the full dense shape of the output + SparseTensor. This is the same as new_shape but with any -1 dimensions + filled in.

sparseSegmentMean

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

data

-> Tensor v'2 tidx

indices: A 1-D tensor. Has same rank as segment_ids.

-> Tensor v'3 Int32

segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

Computes the mean along sparse segments of a tensor.

Read the section on + Segmentation for an explanation + of segments.

Like SegmentMean, but segment_ids can have rank less than `data`'s first + dimension, selecting a subset of dimension 0, specified by indices.

sparseSegmentMean'

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

data

-> Tensor v'2 tidx

indices: A 1-D tensor. Has same rank as segment_ids.

-> Tensor v'3 Int32

segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

sparseSegmentMeanGrad

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

grad: gradient propagated to the SparseSegmentMean op.

-> Tensor v'2 tidx

indices: indices passed to the corresponding SparseSegmentMean op.

-> Tensor v'3 Int32

segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.

-> Tensor v'4 Int32

output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.

-> Tensor Build t

output

Computes gradients for SparseSegmentMean.

Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is output_dim0.

sparseSegmentMeanGrad'

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

grad: gradient propagated to the SparseSegmentMean op.

-> Tensor v'2 tidx

indices: indices passed to the corresponding SparseSegmentMean op.

-> Tensor v'3 Int32

segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.

-> Tensor v'4 Int32

output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.

-> Tensor Build t

output

sparseSegmentSqrtN

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

data

-> Tensor v'2 tidx

indices: A 1-D tensor. Has same rank as segment_ids.

-> Tensor v'3 Int32

segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

Computes the sum along sparse segments of a tensor divided by the sqrt of N.

N is the size of the segment being reduced.

Read the section on + Segmentation for an explanation + of segments.

sparseSegmentSqrtN'

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

data

-> Tensor v'2 tidx

indices: A 1-D tensor. Has same rank as segment_ids.

-> Tensor v'3 Int32

segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

sparseSegmentSqrtNGrad

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

grad: gradient propagated to the SparseSegmentSqrtN op.

-> Tensor v'2 tidx

indices: indices passed to the corresponding SparseSegmentSqrtN op.

-> Tensor v'3 Int32

segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.

-> Tensor v'4 Int32

output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.

-> Tensor Build t

output

Computes gradients for SparseSegmentSqrtN.

Returns tensor "output" with same shape as grad, except for dimension 0 whose + value is output_dim0.

sparseSegmentSqrtNGrad'

Arguments

:: (OneOf `[Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

grad: gradient propagated to the SparseSegmentSqrtN op.

-> Tensor v'2 tidx

indices: indices passed to the corresponding SparseSegmentSqrtN op.

-> Tensor v'3 Int32

segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.

-> Tensor v'4 Int32

output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.

-> Tensor Build t

output

sparseSegmentSum

Arguments

:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

data

-> Tensor v'2 tidx

indices: A 1-D tensor. Has same rank as segment_ids.

-> Tensor v'3 Int32

segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

Computes the sum along sparse segments of a tensor.

Read the section on + Segmentation for an explanation + of segments.

Like SegmentSum, but segment_ids can have rank less than `data`'s first + dimension, selecting a subset of dimension 0, specified by indices.

For example:

```prettyprint + c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])

# Select two rows, one segment. + tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + ==> [[0 0 0 0]]

# Select two rows, two segment. + tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + ==> [[ 1 2 3 4] + [-1 -2 -3 -4]]

# Select all rows, two segments. + tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + ==> [[0 0 0 0] + [5 6 7 8]]

# Which is equivalent to: + tf.segment_sum(c, tf.constant([0, 0, 1])) + ```

sparseSegmentSum'

Arguments

:: (OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

data

-> Tensor v'2 tidx

indices: A 1-D tensor. Has same rank as segment_ids.

-> Tensor v'3 Int32

segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

-> Tensor Build t

output: Has same shape as data, except for dimension 0 which + has size k, the number of segments.

sparseSoftmax

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 Int64

sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a + SparseTensor, in canonical ordering.

-> Tensor v'2 t

sp_values: 1-D. NNZ non-empty values corresponding to sp_indices.

-> Tensor v'3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor Build t

output: 1-D. The NNZ values for the result SparseTensor.

Applies softmax to a batched N-D SparseTensor.

The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` + (where `N >= 2`), and with indices sorted in the canonical lexicographic order.

This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost + logical submatrix with shape `[B, C]`, but with the catch that *the implicitly + zero elements do not participate*. Specifically, the algorithm is equivalent + to the following:

  1. Applies `tf.nn.softmax()` to a densified view of each innermost submatrix + with shape `[B, C]`, along the size-C dimension;
  2. Masks out the original implicitly-zero locations;
  3. Renormalizes the remaining elements.

Hence, the SparseTensor result has exactly the same non-zero indices and + shape.

sparseSoftmax'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int64

sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a + SparseTensor, in canonical ordering.

-> Tensor v'2 t

sp_values: 1-D. NNZ non-empty values corresponding to sp_indices.

-> Tensor v'3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor Build t

output: 1-D. The NNZ values for the result SparseTensor.

sparseSoftmaxCrossEntropyWithLogits

Arguments

:: (OneOf `[Word16, Double, Float]` t, OneOf `[Int32, Int64]` tlabels) 
=> Tensor v'1 t

features: batch_size x num_classes matrix

-> Tensor v'2 tlabels

labels: batch_size vector with values in [0, num_classes). + This is the label for the given minibatch entry.

-> (Tensor Build t, Tensor Build t)

(loss, backprop)

  • loss: Per example loss (batch_size vector).
  • backprop: backpropagated gradients (batch_size x num_classes matrix).

Computes softmax cross entropy cost and gradients to backpropagate.

Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept + a matrix of label probabilities, but rather a single label per row + of features. This label is considered to have probability 1.0 for the + given row.

Inputs are the logits, not probabilities.

sparseSoftmaxCrossEntropyWithLogits'

Arguments

:: (OneOf `[Word16, Double, Float]` t, OneOf `[Int32, Int64]` tlabels) 
=> OpParams 
-> Tensor v'1 t

features: batch_size x num_classes matrix

-> Tensor v'2 tlabels

labels: batch_size vector with values in [0, num_classes). + This is the label for the given minibatch entry.

-> (Tensor Build t, Tensor Build t)

(loss, backprop)

  • loss: Per example loss (batch_size vector).
  • backprop: backpropagated gradients (batch_size x num_classes matrix).

sparseSparseMaximum

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 Int64

a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, in the canonical lexicographic ordering.

-> Tensor v'2 t

a_values: 1-D. N non-empty values corresponding to a_indices.

-> Tensor v'3 Int64

a_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 Int64

b_indices: counterpart to a_indices for the other operand.

-> Tensor v'5 t

b_values: counterpart to a_values for the other operand; must be of the same dtype.

-> Tensor v'6 Int64

b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

-> (Tensor Build Int64, Tensor Build t)

(output_indices, output_values)

  • output_indices: 2-D. The indices of the output SparseTensor.
  • output_values: 1-D. The values of the output SparseTensor.

Returns the element-wise max of two SparseTensors.

Assumes the two SparseTensors have the same shape, i.e., no broadcasting.

sparseSparseMaximum'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int64

a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, in the canonical lexicographic ordering.

-> Tensor v'2 t

a_values: 1-D. N non-empty values corresponding to a_indices.

-> Tensor v'3 Int64

a_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 Int64

b_indices: counterpart to a_indices for the other operand.

-> Tensor v'5 t

b_values: counterpart to a_values for the other operand; must be of the same dtype.

-> Tensor v'6 Int64

b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

-> (Tensor Build Int64, Tensor Build t)

(output_indices, output_values)

  • output_indices: 2-D. The indices of the output SparseTensor.
  • output_values: 1-D. The values of the output SparseTensor.

sparseSparseMinimum

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 Int64

a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, in the canonical lexicographic ordering.

-> Tensor v'2 t

a_values: 1-D. N non-empty values corresponding to a_indices.

-> Tensor v'3 Int64

a_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 Int64

b_indices: counterpart to a_indices for the other operand.

-> Tensor v'5 t

b_values: counterpart to a_values for the other operand; must be of the same dtype.

-> Tensor v'6 Int64

b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

-> (Tensor Build Int64, Tensor Build t)

(output_indices, output_values)

  • output_indices: 2-D. The indices of the output SparseTensor.
  • output_values: 1-D. The values of the output SparseTensor.

Returns the element-wise min of two SparseTensors.

Assumes the two SparseTensors have the same shape, i.e., no broadcasting.

sparseSparseMinimum'

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 Int64

a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + SparseTensor, in the canonical lexicographic ordering.

-> Tensor v'2 t

a_values: 1-D. N non-empty values corresponding to a_indices.

-> Tensor v'3 Int64

a_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v'4 Int64

b_indices: counterpart to a_indices for the other operand.

-> Tensor v'5 t

b_values: counterpart to a_values for the other operand; must be of the same dtype.

-> Tensor v'6 Int64

b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

-> (Tensor Build Int64, Tensor Build t)

(output_indices, output_values)

  • output_indices: 2-D. The indices of the output SparseTensor.
  • output_values: 1-D. The values of the output SparseTensor.

sparseSplit

Arguments

:: TensorType t 
=> Int64

num_split: The number of ways to split.

-> Tensor v'1 Int64

split_dim: 0-D. The dimension along which to split. Must be in the range + `[0, rank(shape))`.

-> Tensor v'2 Int64

indices: 2-D tensor represents the indices of the sparse tensor.

-> Tensor v'3 t

values: 1-D tensor represents the values of the sparse tensor.

-> Tensor v'4 Int64

shape: 1-D. tensor represents the shape of the sparse tensor. + output indices: A list of 1-D tensors represents the indices of the output + sparse tensors.

-> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64])

(output_indices, output_values, output_shape)

  • output_indices
  • output_values: A list of 1-D tensors represents the values of the output sparse + tensors.
  • output_shape: A list of 1-D tensors represents the shape of the output sparse + tensors.

Split a SparseTensor into num_split tensors along one dimension.

If the `shape[split_dim]` is not an integer multiple of num_split. Slices + `[0 : shape[split_dim] % num_split]` gets one extra dimension. + For example, if `split_dim = 1` and `num_split = 2` and the input is

input_tensor = shape = [2, 7] + [ a d e ] + [b c ]

Graphically the output tensors are:

output_tensor[0] = shape = [2, 4] + [ a ] + [b c ]

output_tensor[1] = shape = [2, 3] + [ d e ] + [ ]

sparseSplit'

Arguments

:: TensorType t 
=> OpParams 
-> Int64

num_split: The number of ways to split.

-> Tensor v'1 Int64

split_dim: 0-D. The dimension along which to split. Must be in the range + `[0, rank(shape))`.

-> Tensor v'2 Int64

indices: 2-D tensor represents the indices of the sparse tensor.

-> Tensor v'3 t

values: 1-D tensor represents the values of the sparse tensor.

-> Tensor v'4 Int64

shape: 1-D. tensor represents the shape of the sparse tensor. + output indices: A list of 1-D tensors represents the indices of the output + sparse tensors.

-> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64])

(output_indices, output_values, output_shape)

  • output_indices
  • output_values: A list of 1-D tensors represents the values of the output sparse + tensors.
  • output_shape: A list of 1-D tensors represents the shape of the output sparse + tensors.

sparseTensorDenseAdd

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor v'1 tindices

a_indices: 2-D. The indices of the SparseTensor, with shape `[nnz, ndims]`.

-> Tensor v'2 t

a_values: 1-D. The values of the SparseTensor, with shape `[nnz]`.

-> Tensor v'3 tindices

a_shape: 1-D. The shape of the SparseTensor, with shape `[ndims]`.

-> Tensor v'4 t

b: ndims-D Tensor. With shape a_shape.

-> Tensor Build t

output

Adds up a SparseTensor and a dense Tensor, producing a dense Tensor.

This Op does not require a_indices be sorted in standard lexicographic order.

sparseTensorDenseAdd'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor v'1 tindices

a_indices: 2-D. The indices of the SparseTensor, with shape `[nnz, ndims]`.

-> Tensor v'2 t

a_values: 1-D. The values of the SparseTensor, with shape `[nnz]`.

-> Tensor v'3 tindices

a_shape: 1-D. The shape of the SparseTensor, with shape `[ndims]`.

-> Tensor v'4 t

b: ndims-D Tensor. With shape a_shape.

-> Tensor Build t

output

sparseTensorDenseMatMul

Arguments

:: TensorType t 
=> Tensor v'1 Int64

a_indices: 2-D. The indices of the SparseTensor, size `[nnz, 2]` Matrix.

-> Tensor v'2 t

a_values: 1-D. The values of the SparseTensor, size `[nnz]` Vector.

-> Tensor v'3 Int64

a_shape: 1-D. The shape of the SparseTensor, size `[2]` Vector.

-> Tensor v'4 t

b: 2-D. A dense Matrix.

-> Tensor Build t

product

Multiply SparseTensor (of rank 2) A by dense matrix B.

No validity checking is performed on the indices of A. However, the following + input format is recommended for optimal behavior:

if adjoint_a == false: + A should be sorted in lexicographically increasing order. Use SparseReorder + if you're not sure. + if adjoint_a == true: + A should be sorted in order of increasing dimension 1 (i.e., "column major" + order instead of "row major" order).

sparseTensorDenseMatMul'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 Int64

a_indices: 2-D. The indices of the SparseTensor, size `[nnz, 2]` Matrix.

-> Tensor v'2 t

a_values: 1-D. The values of the SparseTensor, size `[nnz]` Vector.

-> Tensor v'3 Int64

a_shape: 1-D. The shape of the SparseTensor, size `[2]` Vector.

-> Tensor v'4 t

b: 2-D. A dense Matrix.

-> Tensor Build t

product

sparseToDense

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tindices) 
=> Tensor v'1 tindices

sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + index where `sparse_values[i]` will be placed.

-> Tensor v'2 tindices

output_shape: 1-D. Shape of the dense output tensor.

-> Tensor v'3 t

sparse_values: 1-D. Values corresponding to each row of sparse_indices, + or a scalar value to be used for all sparse indices.

-> Tensor v'4 t

default_value: Scalar value to set for indices not specified in + sparse_indices.

-> Tensor Build t

dense: Dense output tensor of shape output_shape.

Converts a sparse representation into a dense tensor.

Builds an array dense with shape output_shape such that

```prettyprint + # If sparse_indices is scalar + dense[i] = (i == sparse_indices ? sparse_values : default_value)

# If sparse_indices is a vector, then for each i + dense[sparse_indices[i]] = sparse_values[i]

# If sparse_indices is an n by d matrix, then for each i in [0, n) + dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] + ```

All other values in dense are set to default_value. If sparse_values is a + scalar, all sparse indices are set to this single value.

Indices should be sorted in lexicographic order, and indices must not + contain any repeats. If validate_indices is true, these properties + are checked during execution.

sparseToDense'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor v'1 tindices

sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + index where `sparse_values[i]` will be placed.

-> Tensor v'2 tindices

output_shape: 1-D. Shape of the dense output tensor.

-> Tensor v'3 t

sparse_values: 1-D. Values corresponding to each row of sparse_indices, + or a scalar value to be used for all sparse indices.

-> Tensor v'4 t

default_value: Scalar value to set for indices not specified in + sparse_indices.

-> Tensor Build t

dense: Dense output tensor of shape output_shape.

sparseToSparseSetOperation

Arguments

:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
=> Tensor v'1 Int64

set1_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

-> Tensor v'2 t

set1_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

-> Tensor v'3 Int64

set1_shape: 1D Tensor, shape of a SparseTensor. `set1_shape[0...n-1]` must + be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the + max set size across `0...n-1` dimensions.

-> Tensor v'4 Int64

set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

-> Tensor v'5 t

set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

-> Tensor v'6 Int64

set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must + be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the + max set size across `0...n-1` dimensions.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(result_indices, result_values, result_shape)

  • result_indices: 2D indices of a SparseTensor.
  • result_values: 1D values of a SparseTensor.
  • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` + is the max result set size across all `0...n-1` dimensions.

Applies set operation along last dimension of 2 SparseTensor inputs.

See SetOperationOp::SetOperationFromContext for values of set_operation.

If validate_indices is True, SparseToSparseSetOperation validates the + order and range of set1 and set2 indices.

Input set1 is a SparseTensor represented by set1_indices, set1_values, + and set1_shape. For set1 ranked n, 1st `n-1` dimensions must be the same + as set2. Dimension n contains values in a set, duplicates are allowed but + ignored.

Input set2 is a SparseTensor represented by set2_indices, set2_values, + and set2_shape. For set2 ranked n, 1st `n-1` dimensions must be the same + as set1. Dimension n contains values in a set, duplicates are allowed but + ignored.

If validate_indices is True, this op validates the order and range of set1 + and set2 indices.

Output result is a SparseTensor represented by result_indices, + result_values, and result_shape. For set1 and set2 ranked n, this + has rank n and the same 1st `n-1` dimensions as set1 and set2. The nth + dimension contains the result of set_operation applied to the corresponding + `[0...n-1]` dimension of set.

sparseToSparseSetOperation'

Arguments

:: OneOf `[ByteString, Int16, Int32, Int64, Int8, Word16, Word8]` t 
=> OpParams 
-> Tensor v'1 Int64

set1_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

-> Tensor v'2 t

set1_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

-> Tensor v'3 Int64

set1_shape: 1D Tensor, shape of a SparseTensor. `set1_shape[0...n-1]` must + be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the + max set size across `0...n-1` dimensions.

-> Tensor v'4 Int64

set2_indices: 2D Tensor, indices of a SparseTensor. Must be in row-major + order.

-> Tensor v'5 t

set2_values: 1D Tensor, values of a SparseTensor. Must be in row-major + order.

-> Tensor v'6 Int64

set2_shape: 1D Tensor, shape of a SparseTensor. `set2_shape[0...n-1]` must + be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the + max set size across `0...n-1` dimensions.

-> (Tensor Build Int64, Tensor Build t, Tensor Build Int64)

(result_indices, result_values, result_shape)

  • result_indices: 2D indices of a SparseTensor.
  • result_values: 1D values of a SparseTensor.
  • result_shape: 1D Tensor shape of a SparseTensor. `result_shape[0...n-1]` is + the same as the 1st `n-1` dimensions of set1 and set2, `result_shape[n]` + is the max result set size across all `0...n-1` dimensions.

split

Arguments

:: TensorType t 
=> Int64

num_split: The number of ways to split. Must evenly divide + `value.shape[split_dim]`.

-> Tensor v'1 Int32

split_dim: 0-D. The dimension along which to split. Must be in the range + `[0, rank(value))`.

-> Tensor v'2 t

value: The tensor to split.

-> [Tensor Build t]

output: They are identically shaped tensors, whose shape matches that of value + except along split_dim, where their sizes are + `values.shape[split_dim] / num_split`.

Splits a tensor into num_split tensors along one dimension.

split'

Arguments

:: TensorType t 
=> OpParams 
-> Int64

num_split: The number of ways to split. Must evenly divide + `value.shape[split_dim]`.

-> Tensor v'1 Int32

split_dim: 0-D. The dimension along which to split. Must be in the range + `[0, rank(value))`.

-> Tensor v'2 t

value: The tensor to split.

-> [Tensor Build t]

output: They are identically shaped tensors, whose shape matches that of value + except along split_dim, where their sizes are + `values.shape[split_dim] / num_split`.

splitV

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tlen) 
=> Int64

num_split

-> Tensor v'1 t

value: The tensor to split.

-> Tensor v'2 tlen

size_splits: list containing the sizes of each output tensor along the split + dimension. Must sum to the dimension of value along split_dim. + Can contain one -1 indicating that dimension is to be inferred.

-> Tensor v'3 Int32

split_dim: 0-D. The dimension along which to split. Must be in the range + `[0, rank(value))`.

-> [Tensor Build t]

output: Tensors whose shape matches that of value + except along split_dim, where their sizes are + `size_splits[i]`.

Splits a tensor into num_split tensors along one dimension.

splitV'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tlen) 
=> OpParams 
-> Int64

num_split

-> Tensor v'1 t

value: The tensor to split.

-> Tensor v'2 tlen

size_splits: list containing the sizes of each output tensor along the split + dimension. Must sum to the dimension of value along split_dim. + Can contain one -1 indicating that dimension is to be inferred.

-> Tensor v'3 Int32

split_dim: 0-D. The dimension along which to split. Must be in the range + `[0, rank(value))`.

-> [Tensor Build t]

output: Tensors whose shape matches that of value + except along split_dim, where their sizes are + `size_splits[i]`.

sqrt

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes square root of x element-wise.

I.e., \(y = sqrt{x} = x^{1/2}\).

sqrt'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

sqrtGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Computes the gradient for the sqrt of x wrt its input.

Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and dy + is the corresponding input gradient.

sqrtGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

square

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes square of x element-wise.

I.e., \(y = x * x = x^2\).

squaredDifference

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns (x - y)(x - y) element-wise.

  • NOTE*: SquaredDifference supports broadcasting. More about broadcasting + here

squeeze

Arguments

:: TensorType t 
=> Tensor v'1 t

input: The input to squeeze.

-> Tensor Build t

output: Contains the same data as input, but has one or more dimensions of + size 1 removed.

Removes dimensions of size 1 from the shape of a tensor.

Given a tensor input, this operation returns a tensor of the same type with + all dimensions of size 1 removed. If you don't want to remove all size 1 + dimensions, you can remove specific size 1 dimensions by specifying + squeeze_dims.

For example:

```prettyprint + # t is a tensor of shape [1, 2, 1, 3, 1, 1] + shape(squeeze(t)) ==> [2, 3] + ```

Or, to remove specific size 1 dimensions:

```prettyprint + # t is a tensor of shape [1, 2, 1, 3, 1, 1] + shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] + ```

squeeze'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input: The input to squeeze.

-> Tensor Build t

output: Contains the same data as input, but has one or more dimensions of + size 1 removed.

stack

Arguments

:: MonadBuild m' 
=> DataType

elem_type: The type of the elements on the stack.

-> m' (Tensor Ref ByteString)

handle: The handle to the stack.

A stack that produces elements in first-in last-out order.

stack'

Arguments

:: MonadBuild m' 
=> OpParams 
-> DataType

elem_type: The type of the elements on the stack.

-> m' (Tensor Ref ByteString)

handle: The handle to the stack.

stackClose

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

handle: The handle to a stack.

-> m' ControlNode 

Delete the stack from its resource container.

stackClose'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a stack.

-> m' ControlNode 

stackPop

Arguments

:: (MonadBuild m', TensorType elem_type) 
=> Tensor Ref ByteString

handle: The handle to a stack.

-> m' (Tensor Value elem_type)

elem: The tensor that is popped from the top of the stack.

Pop the element at the top of the stack.

stackPop'

Arguments

:: (MonadBuild m', TensorType elem_type) 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a stack.

-> m' (Tensor Value elem_type)

elem: The tensor that is popped from the top of the stack.

stackPush

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref ByteString

handle: The handle to a stack.

-> Tensor v'2 t

elem: The tensor to be pushed onto the stack.

-> m' (Tensor Value t)

output: The same tensor as the input elem.

Push an element onto the stack.

stackPush'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref ByteString

handle: The handle to a stack.

-> Tensor v'2 t

elem: The tensor to be pushed onto the stack.

-> m' (Tensor Value t)

output: The same tensor as the input elem.

stage

Arguments

:: (MonadBuild m', TensorTypes dtypes) 
=> TensorList v'1 dtypes

values: a list of tensors

-> m' ControlNode 

Stage values similar to a lightweight Enqueue. The basic functionality of this

Op is similar to a queue with many fewer capabilities and options. This Op is + optimized for performance.

stage'

Arguments

:: (MonadBuild m', TensorTypes dtypes) 
=> OpParams 
-> TensorList v'1 dtypes

values: a list of tensors

-> m' ControlNode 

stopGradient

Arguments

:: TensorType t 
=> Tensor v'1 t

input

-> Tensor Build t

output

Stops gradient computation.

When executed in a graph, this op outputs its input tensor as-is.

When building ops to compute gradients, this op prevents the contribution of + its inputs to be taken into account. Normally, the gradient generator adds ops + to a graph to compute the derivatives of a specified loss by recursively + finding out inputs that contributed to its computation. If you insert this op + in the graph it inputs are masked from the gradient generator. They are not + taken into account for computing gradients.

This is useful any time you want to compute a value with TensorFlow but need + to pretend that the value was a constant. Some examples include:

  • The *EM* algorithm where the *M-step* should not involve backpropagation + through the output of the *E-step*.
  • Contrastive divergence training of Boltzmann machines where, when + differentiating the energy function, the training must not backpropagate + through the graph that generated the samples from the model.
  • Adversarial training, where no backprop should happen through the adversarial + example generation process.

stopGradient'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor Build t

output

stridedSlice

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` index) 
=> Tensor v'1 t

input

-> Tensor v'2 index

begin: `begin[k]` specifies the offset into the kth range specification. The exact dimension this corresponds to will be determined by context. Out-of-bounds values will be silently clamped. If the kth bit of begin_mask then `begin[k]` is ignored and the full range of the appropriate dimension is used instead. Negative values causes indexing - to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.

-> Tensor v3 index

end: `end[i]` is like begin with the exception that end_mask is - used to determine full ranges.

-> Tensor v4 index

strides: `strides[i]` specifies the increment in the ith specification + to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.

-> Tensor v'3 index

end: `end[i]` is like begin with the exception that end_mask is + used to determine full ranges.

-> Tensor v'4 index

strides: `strides[i]` specifies the increment in the ith specification after extracting a given element. Negative indices will reverse the original order. Out or range values are - clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`

-> Tensor Value t

output

Return a strided slice from input.

Note, most python users will want to use the Python __getitem__ + clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`

-> Tensor Build t

output

Return a strided slice from input.

Note, most python users will want to use the Python __getitem__ or __getitem__ rather than this op directly.

The goal of this op is to produce a new tensor with a subset of the elements from the n dimensional input tensor. The subset is chosen using a sequence of m sparse range specifications encoded into the arguments @@ -801,25 +3316,35 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core receive 0, 0, and 1, respectively. The appropriate bits in begin_mask and end_mask are also set.

  • Requirements*: `0 != strides[i] for i in [0, m)` - `ellipsis_mask must be a power of two (only one ellipsis)`

slice Source

Arguments

:: (TensorType t, TensorType index, OneOf `[Int32, Int64]` index) 
=> Tensor v1 t

input

-> Tensor v2 index

begin: begin[i] specifies the offset into the ith dimension of - input to slice from.

-> Tensor v3 index

size: size[i] specifies the number of elements of the ith dimension - of input to slice. If size[i] is -1, all remaining elements in dimension - i are included in the slice (i.e. this is equivalent to setting - size[i] = input.dim_size(i) - begin[i]).

-> Tensor Value t

output

Return a slice from input.

The output tensor is a tensor with dimensions described by size - whose values are extracted from input starting at the offsets in - begin.

  • Requirements*: - 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)

quantizedConv2D Source

Arguments

:: (TensorType tinput, OneOf `[Int16, Int32, Word16, Word8]` tinput, TensorType tfilter, OneOf `[Int16, Int32, Word16, Word8]` tfilter, TensorType out_type, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v1 tinput

input

-> Tensor v2 tfilter

filter: filter's input_depth dimension must match input's depth dimensions.

-> Tensor v3 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v4 Float

max_input: The float value that the highest quantized input value represents.

-> Tensor v5 Float

min_filter: The float value that the lowest quantized filter value represents.

-> Tensor v6 Float

max_filter: The float value that the highest quantized filter value represents.

-> (Tensor Value out_type, Tensor Value Float, Tensor Value Float)

(output, min_output, max_output)

  • output
  • min_output: The float value that the lowest quantized output value represents.
  • max_output: The float value that the highest quantized output value represents.

Computes a 2D convolution given quantized 4D input and filter tensors.

The inputs are quantized tensors where the lowest value represents the real - number of the associated minimum, and the highest represents the maximum. - This means that you can only interpret the quantized output in the same way, by - taking the returned minimum and maximum values into account.

relu6Grad Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

gradients: The backpropagated gradients to the corresponding Relu6 operation.

-> Tensor v2 t

features: The features passed as input to the corresponding Relu6 operation.

-> Tensor Value t

backprops: The gradients: - `gradients * (features > 0) * (features < 6)`.

Computes rectified linear 6 gradients for a Relu6 operation.

avgPoolGrad Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 Int32

orig_input_shape: 1-D. Shape of the original input to avg_pool.

-> Tensor v2 t

grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. - the output of avg_pool.

-> Tensor Value t

output: 4-D. Gradients w.r.t. the input of avg_pool.

Computes gradients of the average pooling function.

stringSplit Source

Arguments

:: Tensor v1 ByteString

input: 1-D. Strings to split.

-> Tensor v2 ByteString

delimiter: 0-D. Delimiter character, or empty string.

-> (Tensor Value Int64, Tensor Value ByteString, Tensor Value Int64)

(indices, values, shape)

  • indices: A dense matrix of int64 representing the indices of the sparse tensor.
  • values: A vector of strings corresponding to the splited values.
  • shape: a length-2 vector of int64 representing the shape of the sparse + `ellipsis_mask must be a power of two (only one ellipsis)`

stridedSlice'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` index) 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 index

begin: `begin[k]` specifies the offset into the kth range specification. + The exact dimension this corresponds to will be determined by context. + Out-of-bounds values will be silently clamped. If the kth bit of + begin_mask then `begin[k]` is ignored and the full range of the + appropriate dimension is used instead. Negative values causes indexing + to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.

-> Tensor v'3 index

end: `end[i]` is like begin with the exception that end_mask is + used to determine full ranges.

-> Tensor v'4 index

strides: `strides[i]` specifies the increment in the ith specification + after extracting a given element. Negative indices will reverse + the original order. Out or range values are + clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`

-> Tensor Build t

output

stridedSliceAssign

Arguments

:: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` index) 
=> Tensor Ref t

ref

-> Tensor v'2 index

begin

-> Tensor v'3 index

end

-> Tensor v'4 index

strides

-> Tensor v'5 t

value

-> m' (Tensor Ref t)

output_ref

Assign value to the sliced l-value reference of ref.

The values of value are assigned to the positions in the variable + ref that are selected by the slice parameters. The slice parameters + `begin, end, strides, etc. work exactly as in StridedSlice.

NOTE this op currently does not support broadcasting and so value's + shape must be exactly the shape produced by the slice of ref.

stridedSliceAssign'

Arguments

:: (MonadBuild m', TensorType t, OneOf `[Int32, Int64]` index) 
=> OpParams 
-> Tensor Ref t

ref

-> Tensor v'2 index

begin

-> Tensor v'3 index

end

-> Tensor v'4 index

strides

-> Tensor v'5 t

value

-> m' (Tensor Ref t)

output_ref

stridedSliceGrad

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` index) 
=> Tensor v'1 index

shape

-> Tensor v'2 index

begin

-> Tensor v'3 index

end

-> Tensor v'4 index

strides

-> Tensor v'5 t

dy

-> Tensor Build t

output

Returns the gradient of StridedSlice.

Since StridedSlice cuts out pieces of its input which is size + shape, its gradient will have the same shape (which is passed here + as shape). The gradient will be zero in any element that the slice + does not select.

Arguments are the same as StridedSliceGrad with the exception that + dy is the input gradient to be propagated and shape is the + shape of StridedSlice's input.

stridedSliceGrad'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` index) 
=> OpParams 
-> Tensor v'1 index

shape

-> Tensor v'2 index

begin

-> Tensor v'3 index

end

-> Tensor v'4 index

strides

-> Tensor v'5 t

dy

-> Tensor Build t

output

stringJoin

Arguments

:: [Tensor v'1 ByteString]

inputs: A list of string tensors. The tensors must all have the same shape, + or be scalars. Scalars may be mixed in; these will be broadcast to the shape + of non-scalar inputs.

-> Tensor Build ByteString

output

Joins the strings in the given list of string tensors into one tensor;

with the given separator (default is an empty separator).

stringJoin'

Arguments

:: OpParams 
-> [Tensor v'1 ByteString]

inputs: A list of string tensors. The tensors must all have the same shape, + or be scalars. Scalars may be mixed in; these will be broadcast to the shape + of non-scalar inputs.

-> Tensor Build ByteString

output

stringSplit

Arguments

:: Tensor v'1 ByteString

input: 1-D. Strings to split.

-> Tensor v'2 ByteString

delimiter: 0-D. Delimiter characters (bytes), or empty string.

-> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64)

(indices, values, shape)

  • indices: A dense matrix of int64 representing the indices of the sparse tensor.
  • values: A vector of strings corresponding to the splited values.
  • shape: a length-2 vector of int64 representing the shape of the sparse tensor, where the first value is N and the second value is the maximum number of tokens in a single input entry.

Split elements of input based on delimiter into a SparseTensor.

Let N be the size of source (typically N will be the batch size). Split each element of input based on delimiter and return a SparseTensor - containing the splitted tokens. Empty tokens are ignored.

delimiter can be empty or a single-byte character. If delimiter is an empty - string, each element of input is split into individual single-byte character - strings, including splitting of UTF-8 multibyte sequences.

For example: + containing the splitted tokens. Empty tokens are ignored.

delimiter can be empty, or a string of split characters. If delimiter is an + empty string, each element of input is split into individual single-byte + character strings, including splitting of UTF-8 multibyte sequences. Otherwise + every character of delimiter is a potential split point.

For example: N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output will be

indices = [0, 0; 0, 1; @@ -827,1735 +3352,26 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core 1, 1; 1, 2] shape = [2, 3] - values = [hello, world, a, b, c]

rank Source

Arguments

:: TensorType t 
=> Tensor v1 t

input

-> Tensor Value Int32

output

Returns the rank of a tensor.

This operation returns an integer representing the rank of input.

For example:

```prettyprint - # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - # shape of tensor t is [2, 2, 3] - rank(t) ==> 3 - ```

  • *Note**: The rank of a tensor is not the same as the rank of a matrix. The rank - of a tensor is the number of indices required to uniquely select each element - of the tensor. Rank is also known as "order", "degree", or "ndims."

reciprocal Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes the reciprocal of x element-wise.

I.e., \(y = 1 / x\).

reverseSequence Source

Arguments

:: (TensorType t, TensorType tlen, OneOf `[Int32, Int64]` tlen) 
=> Int64

seq_dim: The dimension which is partially reversed.

-> Tensor v1 t

input: The input to reverse.

-> Tensor v2 tlen

seq_lengths: 1-D with length `input.dims(batch_dim)` and - `max(seq_lengths) < input.dims(seq_dim)`

-> Tensor Value t

output: The partially reversed input. It has the same shape as input.

Reverses variable length slices.

This op first slices input along the dimension batch_dim, and for each - slice i, reverses the first `seq_lengths[i]` elements along - the dimension seq_dim.

The elements of seq_lengths must obey `seq_lengths[i] < input.dims[seq_dim]`, - and seq_lengths must be a vector of length `input.dims[batch_dim]`.

The output slice i along dimension batch_dim is then given by input - slice i, with the first `seq_lengths[i]` slices along dimension - seq_dim reversed.

For example:

```prettyprint - # Given this: - batch_dim = 0 - seq_dim = 1 - input.dims = (4, 8, ...) - seq_lengths = [7, 2, 3, 5]

# then slices of input are reversed on seq_dim, but only up to seq_lengths: - output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] - output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] - output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] - output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]

# while entries past seq_lens are copied through: - output[0, 7:, :, ...] = input[0, 7:, :, ...] - output[1, 2:, :, ...] = input[1, 2:, :, ...] - output[2, 3:, :, ...] = input[2, 3:, :, ...] - output[3, 2:, :, ...] = input[3, 2:, :, ...] - ```

In contrast, if:

```prettyprint - # Given this: - batch_dim = 2 - seq_dim = 0 - input.dims = (8, ?, 4, ...) - seq_lengths = [7, 2, 3, 5]

# then slices of input are reversed on seq_dim, but only up to seq_lengths: - output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] - output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] - output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] - output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]

# while entries past seq_lens are copied through: - output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] - output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] - output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] - output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] - ```

biasAddGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

out_backprop: Any number of dimensions.

-> Tensor Value t

output: 1-D with size the feature dimension of out_backprop.

The backward operation for BiasAdd on the "bias" tensor.

It accumulates all the values from out_backprop into the feature dimension. - For NHWC data format, the feature dimension is the last. For NCHW data format, - the feature dimension is the third-to-last.

addSparseToTensorsMap Source

Arguments

:: TensorType t 
=> Tensor v1 Int64

sparse_indices: 2-D. The indices of the SparseTensor.

-> Tensor v2 t

sparse_values: 1-D. The values of the SparseTensor.

-> Tensor v3 Int64

sparse_shape: 1-D. The shape of the SparseTensor.

-> Build (Tensor Value Int64)

sparse_handle: 0-D. The handle of the SparseTensor now stored in the - SparseTensorsMap.

Add a SparseTensor to a SparseTensorsMap return its handle.

A SparseTensor is represented by three tensors: sparse_indices, - sparse_values, and sparse_shape.

This operator takes the given SparseTensor and adds it to a container - object (a SparseTensorsMap). A unique key within this container is generated - in the form of an int64, and this is the value that is returned.

The SparseTensor can then be read out as part of a minibatch by passing - the key as a vector element to TakeManySparseFromTensorsMap. To ensure - the correct SparseTensorsMap is accessed, ensure that the same - container and shared_name are passed to that Op. If no shared_name - is provided here, instead use the *name* of the Operation created by calling - AddSparseToTensorsMap as the shared_name passed to - TakeManySparseFromTensorsMap. Ensure the Operations are colocated.

tan Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes tan of x element-wise.

sparseReduceSumSparse Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 Int64

input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

-> Tensor v2 t

input_values: 1-D. N non-empty values corresponding to input_indices.

-> Tensor v3 Int64

input_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v4 Int32

reduction_axes: 1-D. Length-K vector containing the reduction axes.

-> (Tensor Value Int64, Tensor Value t, Tensor Value Int64)

(output_indices, output_values, output_shape)

  • output_indices
  • output_values
  • output_shape

Computes the sum of elements across dimensions of a SparseTensor.

This Op takes a SparseTensor and is the sparse counterpart to - `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a - SparseTensor.

Reduces sp_input along the dimensions given in reduction_axes. Unless - keep_dims is true, the rank of the tensor is reduced by 1 for each entry in - reduction_axes. If keep_dims is true, the reduced dimensions are retained - with length 1.

If reduction_axes has no entries, all dimensions are reduced, and a tensor - with a single element is returned. Additionally, the axes can be negative, - which are interpreted according to the indexing rules in Python.

shapeN Source

Arguments

:: (TensorType t, TensorType out_type, OneOf `[Int32, Int64]` out_type) 
=> [Tensor v1 t]

input

-> [Tensor Value out_type]

output

Returns shape of tensors.

This operation returns N 1-D integer tensors representing shape of `input[i]s`.

shape Source

Arguments

:: (TensorType t, TensorType out_type, OneOf `[Int32, Int64]` out_type) 
=> Tensor v1 t

input

-> Tensor Value out_type

output

Returns the shape of a tensor.

This operation returns a 1-D integer tensor representing the shape of input.

For example:

```prettyprint - # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - shape(t) ==> [2, 2, 3] - ```

unique Source

Arguments

:: (TensorType t, TensorType out_idx, OneOf `[Int32, Int64]` out_idx) 
=> Tensor v1 t

x: 1-D.

-> (Tensor Value t, Tensor Value out_idx)

(y, idx)

  • y: 1-D.
  • idx: 1-D.

Finds unique elements in a 1-D tensor.

This operation returns a tensor y containing all of the unique elements of x - sorted in the same order that they occur in x. This operation also returns a - tensor idx the same size as x that contains the index of each value of x - in the unique output y. In other words:

`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`

For example:

```prettyprint - # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] - y, idx = unique(x) - y ==> [1, 2, 4, 7, 8] - idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - ```

truncatedNormal Source

Arguments

:: (TensorType dtype, OneOf `[Word16, Double, Float]` dtype, TensorType t, OneOf `[Int32, Int64]` t) 
=> Tensor v1 t

shape: The shape of the output tensor.

-> Build (Tensor Value dtype)

output: A tensor of the specified shape filled with random truncated normal - values.

Outputs random values from a truncated normal distribution.

The generated values follow a normal distribution with mean 0 and standard - deviation 1, except that values whose magnitude is more than 2 standard - deviations from the mean are dropped and re-picked.

invertPermutation Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` t) 
=> Tensor v1 t

x: 1-D.

-> Tensor Value t

y: 1-D.

Computes the inverse permutation of a tensor.

This operation computes the inverse of an index permutation. It takes a 1-D - integer tensor x, which represents the indices of a zero-based array, and - swaps each value with its index position. In other words, for an output tensor - y and an input tensor x, this operation computes the following:

`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`

The values must include 0. There can be no duplicate values or negative values.

For example:

```prettyprint - # tensor x is [3, 4, 0, 2, 1] - invert_permutation(x) ==> [2, 4, 3, 0, 1] - ```

checkNumerics Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

tensor

-> Tensor Value t

output

Checks a tensor for NaN and Inf values.

When run, reports an InvalidArgument error if tensor has any values - that are not a number (NaN) or infinity (Inf). Otherwise, passes tensor as-is.

uniformCandidateSampler Source

Arguments

:: Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled - candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

-> Tensor v1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is - the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing - the number of times each candidate is expected to occur in a batch - of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled - candidate representing the number of times the candidate is expected - to occur in a batch of sampled candidates. If unique=true, then this is a - probability.

Generates labels for candidate sampling with a uniform distribution.

See explanations of candidate sampling and the data formats at - go/candidate-sampling.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the - possibility of efficient dense matrix multiplication. The disadvantage is that - the sampled candidates must be chosen independently of the context and of the - true labels.

gather Source

Arguments

:: (TensorType tparams, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor v1 tparams

params

-> Tensor v2 tindices

indices

-> Tensor Value tparams

output

Gather slices from params according to indices.

indices must be an integer tensor of any dimension (usually 0-D or 1-D). - Produces an output tensor with shape `indices.shape + params.shape[1:]` where:

```python - # Scalar indices - output[:, ..., :] = params[indices, :, ... :]

# Vector indices - output[i, :, ..., :] = params[indices[i], :, ... :]

# Higher rank indices - output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] - ```

If indices is a permutation and `len(indices) == params.shape[0]` then - this operation will permute params accordingly.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/Gather.png" alt - /div

const Source

Arguments

:: TensorType dtype 
=> Tensor Value dtype

output

Returns a constant tensor.

fill Source

Arguments

:: TensorType t 
=> Tensor v1 Int32

dims: 1-D. Represents the shape of the output tensor.

-> Tensor v2 t

value: 0-D (scalar). Value to fill the returned tensor.

compatibility(numpy) - Equivalent to np.full - end_compatibility

-> Tensor Value t

output

Creates a tensor filled with a scalar value.

This operation creates a tensor of shape dims and fills it with value.

For example:

```prettyprint - # Output tensor has shape [2, 3]. - fill([2, 3], 9) ==> [[9, 9, 9] - [9, 9, 9]] - ```

editDistance Source

Arguments

:: TensorType t 
=> Tensor v1 Int64

hypothesis_indices: The indices of the hypothesis list SparseTensor. - This is an N x R int64 matrix.

-> Tensor v2 t

hypothesis_values: The values of the hypothesis list SparseTensor. - This is an N-length vector.

-> Tensor v3 Int64

hypothesis_shape: The shape of the hypothesis list SparseTensor. - This is an R-length vector.

-> Tensor v4 Int64

truth_indices: The indices of the truth list SparseTensor. - This is an M x R int64 matrix.

-> Tensor v5 t

truth_values: The values of the truth list SparseTensor. - This is an M-length vector.

-> Tensor v6 Int64

truth_shape: truth indices, vector.

-> Tensor Value Float

output: A dense float tensor with rank R - 1.

For the example input:

// hypothesis represents a 2x1 matrix with variable-length values: - // (0,0) = ["a"] - // (1,0) = ["b"] - hypothesis_indices = [[0, 0, 0], - [1, 0, 0]] - hypothesis_values = ["a", "b"] - hypothesis_shape = [2, 1, 1]

// truth represents a 2x2 matrix with variable-length values: - // (0,0) = [] - // (0,1) = ["a"] - // (1,0) = ["b", "c"] - // (1,1) = ["a"] - truth_indices = [[0, 1, 0], - [1, 0, 0], - [1, 0, 1], - [1, 1, 0]] - truth_values = ["a", "b", "c", "a"] - truth_shape = [2, 2, 2] - normalize = true

The output will be:

// output is a 2x2 matrix with edit distances normalized by truth lengths. - output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis - [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis

Computes the (possibly normalized) Levenshtein Edit Distance.

The inputs are variable-length sequences provided by SparseTensors - (hypothesis_indices, hypothesis_values, hypothesis_shape) - and - (truth_indices, truth_values, truth_shape).

The inputs are:

reverse Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

tensor: Up to 8-D.

-> Tensor v2 Bool

dims: 1-D. The dimensions to reverse.

-> Tensor Value t

output: The same shape as tensor.

Reverses specific dimensions of a tensor.

Given a tensor, and a bool tensor dims representing the dimensions - of tensor, this operation reverses each dimension i of tensor where - `dims[i]` is True.

tensor can have up to 8 dimensions. The number of dimensions - of tensor must equal the number of elements in dims. In other words:

`rank(tensor) = size(dims)`

For example:

```prettyprint - # tensor t is [[[[ 0, 1, 2, 3], - # [ 4, 5, 6, 7], - # [ 8, 9, 10, 11]], - # [[12, 13, 14, 15], - # [16, 17, 18, 19], - # [20, 21, 22, 23]]]] - # tensor t shape is [1, 2, 3, 4]

# dims is [False, False, False, True] - reverse(t, dims) ==> [[[[ 3, 2, 1, 0], - [ 7, 6, 5, 4], - [ 11, 10, 9, 8]], - [[15, 14, 13, 12], - [19, 18, 17, 16], - [23, 22, 21, 20]]]]

# dims is [False, True, False, False] - reverse(t, dims) ==> [[[[12, 13, 14, 15], - [16, 17, 18, 19], - [20, 21, 22, 23] - [[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]]]

# dims is [False, False, True, False] - reverse(t, dims) ==> [[[[8, 9, 10, 11], - [4, 5, 6, 7], - [0, 1, 2, 3]] - [[20, 21, 22, 23], - [16, 17, 18, 19], - [12, 13, 14, 15]]]] - ```

matrixSetDiag Source

Arguments

:: TensorType t 
=> Tensor v1 t

input: Rank `k+1`, where `k >= 1`.

-> Tensor v2 t

diagonal: Rank k, where `k >= 1`.

-> Tensor Value t

output: Rank `k+1`, with `output.shape = input.shape`.

Returns a batched matrix tensor with new batched diagonal values.

Given input and diagonal, this operation returns a tensor with the - same shape and values as input, except for the main diagonal of the - innermost matrices. These will be overwritten by the values in diagonal.

The output is computed as follows:

Assume input has `k+1` dimensions `[I, J, K, ..., M, N]` and diagonal has - k dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a - tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:

  • `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
  • `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.

matrixDiag Source

Arguments

:: TensorType t 
=> Tensor v1 t

diagonal: Rank k, where `k >= 1`.

-> Tensor Value t

output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.

Returns a batched diagonal tensor with a given batched diagonal values.

Given a diagonal, this operation returns a tensor with the diagonal and - everything else padded with zeros. The diagonal is computed as follows:

Assume diagonal has k dimensions `[I, J, K, ..., N]`, then the output is a - tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:

`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.

For example:

```prettyprint - # diagonal is [[1, 2, 3, 4], [5, 6, 7, 8]]

and diagonal.shape = (2, 4)

tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] - [0, 2, 0, 0] - [0, 0, 3, 0] - [0, 0, 0, 4]], - [[5, 0, 0, 0] - [0, 6, 0, 0] - [0, 0, 7, 0] - [0, 0, 0, 8]]]

which has shape (2, 4, 4) - ```

diag Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Double, Float]` t) 
=> Tensor v1 t

diagonal: Rank k tensor where k is at most 3.

-> Tensor Value t

output

Returns a diagonal tensor with a given diagonal values.

Given a diagonal, this operation returns a tensor with the diagonal and - everything else padded with zeros. The diagonal is computed as follows:

Assume diagonal has dimensions [D1,..., Dk], then the output is a tensor of - rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:

`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.

For example:

```prettyprint - # diagonal is [1, 2, 3, 4] - tf.diag(diagonal) ==> [[1, 0, 0, 0] - [0, 2, 0, 0] - [0, 0, 3, 0] - [0, 0, 0, 4]] - ```

immutableConst Source

Arguments

:: TensorType dtype 
=> Shape

shape: Shape of the returned tensor.

-> Tensor Value dtype

tensor

Returns immutable tensor from memory region.

The current implementation memmaps the tensor from a file.

concat Source

Arguments

:: TensorType t 
=> Tensor v1 Int32

concat_dim: 0-D. The dimension along which to concatenate. Must be in the - range [0, rank(values)).

-> [Tensor v2 t]

values: The N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

-> Tensor Value t

output: A Tensor with the concatenation of values stacked along the - concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.

Concatenates tensors along one dimension.

unpack Source

Arguments

:: TensorType t 
=> Int64

num

-> Tensor v1 t

value: 1-D or higher, with axis dimension size equal to num.

-> [Tensor Value t]

output: The list of tensors unpacked from value.

Unpacks a given dimension of a rank-R tensor into num rank-`(R-1)` tensors.

Unpacks num tensors from value by chipping it along the axis dimension. - For example, given a tensor of shape `(A, B, C, D)`;

If `axis == 0` then the i'th tensor in output is the slice `value[i, :, :, :]` - and each tensor in output will have shape `(B, C, D)`. (Note that the - dimension unpacked along is gone, unlike split).

If `axis == 1` then the i'th tensor in output is the slice `value[:, i, :, :]` - and each tensor in output will have shape `(A, C, D)`. - Etc.

This is the opposite of pack.

fact Source

Arguments

:: Tensor Value ByteString

fact

Output a fact about factorials.

abs Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes the absolute value of a tensor.

Given a tensor x, this operation returns a tensor containing the absolute - value of each element in x. For example, if x is an input element and y is - an output element, this operation computes \(y = |x|\).

softmax Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

logits: 2-D with shape `[batch_size, num_classes]`.

-> Tensor Value t

softmax: Same shape as logits.

Computes softmax activations.

For each batch i and class j we have

softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))

reverseV2 Source

Arguments

:: (TensorType tidx, OneOf `[Int32, Int64]` tidx, TensorType t, OneOf `[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

tensor: Up to 8-D.

-> Tensor v2 tidx

axis: 1-D. The indices of the dimensions to reverse.

-> Tensor Value t

output: The same shape as tensor.

Reverses specific dimensions of a tensor.

Given a tensor, and a int32 tensor axis representing the set of - dimensions of tensor to reverse. This operation reverses each dimension - i for which there exists j s.t. `axis[j] == i`.

tensor can have up to 8 dimensions. The number of dimensions specified - in axis may be 0 or more entries. If an index is specified more than - once, a InvalidArgument error is raised.

For example:

```prettyprint - # tensor t is [[[[ 0, 1, 2, 3], - # [ 4, 5, 6, 7], - # [ 8, 9, 10, 11]], - # [[12, 13, 14, 15], - # [16, 17, 18, 19], - # [20, 21, 22, 23]]]] - # tensor t shape is [1, 2, 3, 4]

# dims is [3] or dims is -1 - reverse(t, dims) ==> [[[[ 3, 2, 1, 0], - [ 7, 6, 5, 4], - [ 11, 10, 9, 8]], - [[15, 14, 13, 12], - [19, 18, 17, 16], - [23, 22, 21, 20]]]]

# dims is '[1]' (or dims is '[-3]') - reverse(t, dims) ==> [[[[12, 13, 14, 15], - [16, 17, 18, 19], - [20, 21, 22, 23] - [[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]]]

# dims is '[2]' (or dims is '[-2]') - reverse(t, dims) ==> [[[[8, 9, 10, 11], - [4, 5, 6, 7], - [0, 1, 2, 3]] - [[20, 21, 22, 23], - [16, 17, 18, 19], - [12, 13, 14, 15]]]] - ```

identity Source

Arguments

:: TensorType t 
=> Tensor v1 t

input

-> Tensor Value t

output

Return a tensor with the same shape and contents as the input tensor or value.

sparseAdd Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType treal, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` treal) 
=> Tensor v1 Int64

a_indices: 2-D. The indices of the first SparseTensor, size `[nnz, ndims]` Matrix.

-> Tensor v2 t

a_values: 1-D. The values of the first SparseTensor, size `[nnz]` Vector.

-> Tensor v3 Int64

a_shape: 1-D. The shape of the first SparseTensor, size `[ndims]` Vector.

-> Tensor v4 Int64

b_indices: 2-D. The indices of the second SparseTensor, size `[nnz, ndims]` Matrix.

-> Tensor v5 t

b_values: 1-D. The values of the second SparseTensor, size `[nnz]` Vector.

-> Tensor v6 Int64

b_shape: 1-D. The shape of the second SparseTensor, size `[ndims]` Vector.

-> Tensor v7 treal

thresh: 0-D. The magnitude threshold that determines if an output value/index - pair takes space.

-> (Tensor Value Int64, Tensor Value t, Tensor Value Int64)

(sum_indices, sum_values, sum_shape)

  • sum_indices
  • sum_values
  • sum_shape

Adds two SparseTensor objects to produce another SparseTensor.

The input SparseTensor objects' indices are assumed ordered in standard - lexicographic order. If this is not the case, before this step run - SparseReorder to restore index ordering.

By default, if two values sum to zero at some index, the output SparseTensor - would still include that particular location in its index, storing a zero in the - corresponding value slot. To override this, callers can specify thresh, - indicating that if the sum has a magnitude strictly smaller than thresh, its - corresponding value and index would then not be included. In particular, - `thresh == 0` (default) means everything is kept and actual thresholding happens - only for a positive value.

In the following shapes, nnz is the count after taking thresh into account.

sparseApplyCenteredRMSProp Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

mg: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v6 t

rho: Decay rate. Must be a scalar.

-> Tensor v7 t

momentum

-> Tensor v8 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v9 t

grad: The gradient.

-> Tensor v10 tindices

indices: A vector of indices into the first dimension of var, ms and mom.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' according to the centered RMSProp algorithm.

The centered RMSProp algorithm uses an estimate of the centered second moment - (i.e., the variance) for normalization, as opposed to regular RMSProp, which - uses the (uncentered) second moment. This often helps with training, but is - slightly more expensive in terms of computation and memory.

Note that in dense implementation of this algorithm, mg, ms, and mom will - update even if the grad is zero, but in this sparse implementation, mg, ms, - and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 - mean_grad = decay * mean_grad + (1-decay) * gradient - Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)

ms <- rho * ms_{t-1} + (1-rho) * grad * grad - mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - var <- var - mom

addN Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> [Tensor v1 t]

inputs: Must all be the same size and shape.

-> Tensor Value t

sum

Add all input tensors element wise.

concatOffset Source

Arguments

:: Tensor v1 Int32

concat_dim: The dimension along which to concatenate.

-> [Tensor v2 Int32]

shape: The N int32 vectors representing shape of tensors being concatenated.

-> [Tensor Value Int32]

offset: The N int32 vectors representing the starting offset - of input tensors within the concatenated output.

This is typically used by gradient computations for a concat operation.

Computes offsets of concat inputs within its output.

For example:

```prettyprint - # x is [2, 2, 7] - # y is [2, 3, 7] - # z is [2, 5, 7] - concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0] - ```

concatV2 Source

Arguments

:: (TensorType t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> [Tensor v1 t]

values: List of N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

-> Tensor v2 tidx

axis: 0-D. The dimension along which to concatenate. Must be in the - range [0, rank(values)).

-> Tensor Value t

output: A Tensor with the concatenation of values stacked along the - concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.

Concatenates tensors along one dimension.

zerosLike Source

Arguments

:: TensorType t 
=> Tensor v1 t

x: a tensor of type T.

-> Tensor Value t

y: a tensor of the same shape and type as x but filled with zeros.

Returns a tensor of zeros with the same shape and type as x.

applyCenteredRMSProp Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

mg: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v6 t

rho: Decay rate. Must be a scalar.

-> Tensor v7 t

momentum

-> Tensor v8 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v9 t

grad: The gradient.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' according to the centered RMSProp algorithm.

The centered RMSProp algorithm uses an estimate of the centered second moment - (i.e., the variance) for normalization, as opposed to regular RMSProp, which - uses the (uncentered) second moment. This often helps with training, but is - slightly more expensive in terms of computation and memory.

Note that in dense implementation of this algorithm, mg, ms, and mom will - update even if the grad is zero, but in this sparse implementation, mg, ms, - and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 - mean_grad = decay * mean_grad + (1-decay) * gradient

Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)

mg <- rho * mg_{t-1} + (1-rho) * grad - ms <- rho * ms_{t-1} + (1-rho) * grad * grad - mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) - var <- var - mom

applyRMSProp Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v5 t

rho: Decay rate. Must be a scalar.

-> Tensor v6 t

momentum

-> Tensor v7 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v8 t

grad: The gradient.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' according to the RMSProp algorithm.

Note that in dense implementation of this algorithm, ms and mom will - update even if the grad is zero, but in this sparse implementation, ms - and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 - Delta = learning_rate * gradient / sqrt(mean_square + epsilon)

ms <- rho * ms_{t-1} + (1-rho) * grad * grad - mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - var <- var - mom

assignAddVariableOp Source

Arguments

:: TensorType dtype 
=> ResourceHandle dtype

resource: handle to the resource in which to store the variable.

-> Tensor v2 dtype

value: the value by which the variable will be incremented.

-> Build ControlNode 

Adds a value to the current value of a variable.

Any ReadVariableOp which depends directly or indirectly on this assign is - guaranteed to see the incremented value or a subsequent newer one.

Outputs the incremented value, which can be used to totally order the - increments to this variable.

applyAdam Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

m: Should be from a Variable().

-> Tensor Ref t

v: Should be from a Variable().

-> Tensor v4 t

beta1_power: Must be a scalar.

-> Tensor v5 t

beta2_power: Must be a scalar.

-> Tensor v6 t

lr: Scaling factor. Must be a scalar.

-> Tensor v7 t

beta1: Momentum factor. Must be a scalar.

-> Tensor v8 t

beta2: Momentum factor. Must be a scalar.

-> Tensor v9 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v10 t

grad: The gradient.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' according to the Adam algorithm.

lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) - m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t - v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t - variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)

extractGlimpse Source

Arguments

:: Tensor v1 Float

input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.

-> Tensor v2 Int32

size: A 1-D tensor of 2 elements containing the size of the glimpses - to extract. The glimpse height must be specified first, following - by the glimpse width.

-> Tensor v3 Float

offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing - the x, y locations of the center of each window.

-> Tensor Value Float

glimpse: A tensor representing the glimpses `[batch_size, - glimpse_height, glimpse_width, channels]`.

Extracts a glimpse from the input tensor.

Returns a set of windows called glimpses extracted at location - offsets from the input tensor. If the windows only partially - overlaps the inputs, the non overlapping areas will be filled with - random noise.

The result is a 4-D tensor of shape `[batch_size, glimpse_height, - glimpse_width, channels]`. The channels and batch dimensions are the - same as that of the input tensor. The height and width of the output - windows are specified in the size parameter.

The argument normalized and centered controls how the windows are built:

  • If the coordinates are normalized but not centered, 0.0 and 1.0 - correspond to the minimum and maximum of each height and width - dimension.
  • If the coordinates are both normalized and centered, they range from
  • 1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper - left corner, the lower right corner is located at (1.0, 1.0) and the - center is at (0, 0).
  • If the coordinates are not normalized they are interpreted as - numbers of pixels.

sparseApplyMomentum Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v3 t

lr: Learning rate. Must be a scalar.

-> Tensor v4 t

grad: The gradient.

-> Tensor v5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v6 t

momentum: Momentum. Must be a scalar.

-> Build (Tensor Ref t)

out: Same as "var".

Update relevant entries in '*var' and '*accum' according to the momentum scheme.

Set use_nesterov = True if you want to use Nesterov momentum.

That is for rows we have grad for, we update var and accum as follows:

accum = accum * momentum + grad - var -= lr * accum

applyMomentum Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v4 t

grad: The gradient.

-> Tensor v5 t

momentum: Momentum. Must be a scalar.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' according to the momentum scheme. Set use_nesterov = True if you

want to use Nesterov momentum.

accum = accum * momentum + grad - var -= lr * accum

fIFOQueue Source

Arguments

:: Build (Tensor Ref ByteString)

handle: The handle to the queue.

A queue that produces elements in first-in first-out order.

sparseApplyFtrl Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

linear: Should be from a Variable().

-> Tensor v4 t

grad: The gradient.

-> Tensor v5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v6 t

lr: Scaling factor. Must be a scalar.

-> Tensor v7 t

l1: L1 regularization. Must be a scalar.

-> Tensor v8 t

l2: L2 regularization. Must be a scalar.

-> Tensor v9 t

lr_power: Scaling factor. Must be a scalar.

-> Build (Tensor Ref t)

out: Same as "var".

Update relevant entries in '*var' according to the Ftrl-proximal scheme.

That is for rows we have grad for, we update var, accum and linear as follows: - accum_new = accum + grad * grad - linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var - quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 - accum = accum_new

sparseApplyAdagradDA Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

gradient_accumulator: Should be from a Variable().

-> Tensor Ref t

gradient_squared_accumulator: Should be from a Variable().

-> Tensor v4 t

grad: The gradient.

-> Tensor v5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Tensor v6 t

lr: Learning rate. Must be a scalar.

-> Tensor v7 t

l1: L1 regularization. Must be a scalar.

-> Tensor v8 t

l2: L2 regularization. Must be a scalar.

-> Tensor v9 Int64

global_step: Training step number. Must be a scalar.

-> Build (Tensor Ref t)

out: Same as "var".

Update entries in '*var' and '*accum' according to the proximal adagrad scheme.

floorDiv Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns x // y element-wise.

  • NOTE*: FloorDiv supports broadcasting. More about broadcasting - here

applyAdagradDA Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

gradient_accumulator: Should be from a Variable().

-> Tensor Ref t

gradient_squared_accumulator: Should be from a Variable().

-> Tensor v4 t

grad: The gradient.

-> Tensor v5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v6 t

l1: L1 regularization. Must be a scalar.

-> Tensor v7 t

l2: L2 regularization. Must be a scalar.

-> Tensor v8 Int64

global_step: Training step number. Must be a scalar.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' according to the proximal adagrad scheme.

applyAdagrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v4 t

grad: The gradient.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' according to the adagrad scheme.

accum += grad * grad - var -= lr * grad * (1 / sqrt(accum))

sigmoidGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Computes the gradient of the sigmoid of x wrt its input.

Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and - dy is the corresponding input gradient.

applyAdadelta Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

accum_update: Should be from a Variable().

-> Tensor v4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v5 t

rho: Decay factor. Must be a scalar.

-> Tensor v6 t

epsilon: Constant factor. Must be a scalar.

-> Tensor v7 t

grad: The gradient.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' according to the adadelta scheme.

accum = rho() * accum + (1 - rho()) * grad.square(); - update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; - update_accum = rho() * update_accum + (1 - rho()) * update.square(); - var -= update;

sparseApplyProximalGradientDescent Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor v2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v3 t

l1: L1 regularization. Must be a scalar.

-> Tensor v4 t

l2: L2 regularization. Must be a scalar.

-> Tensor v5 t

grad: The gradient.

-> Tensor v6 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Build (Tensor Ref t)

out: Same as "var".

Sparse update '*var' as FOBOS algorithm with fixed learning rate.

That is for rows we have grad for, we update var as follows: - prox_v = var - alpha * grad - var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

applyProximalGradientDescent Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor v2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v3 t

l1: L1 regularization. Must be a scalar.

-> Tensor v4 t

l2: L2 regularization. Must be a scalar.

-> Tensor v5 t

delta: The change.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' as FOBOS algorithm with fixed learning rate.

prox_v = var - alpha * delta - var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}

matrixSolve Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Double, Float]` t) 
=> Tensor v1 t

matrix: Shape is `[..., M, M]`.

-> Tensor v2 t

rhs: Shape is `[..., M, K]`.

-> Tensor Value t

output: Shape is `[..., M, K]`.

Solves systems of linear equations.

Matrix is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - form square matrices. Rhs is a tensor of shape `[..., M, K]`. The output is - a tensor shape `[..., M, K]`. If adjoint is False then each output matrix - satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. - If adjoint is True then each output matrix satisfies - `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.

sparseApplyProximalAdagrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v3 t

lr: Learning rate. Must be a scalar.

-> Tensor v4 t

l1: L1 regularization. Must be a scalar.

-> Tensor v5 t

l2: L2 regularization. Must be a scalar.

-> Tensor v6 t

grad: The gradient.

-> Tensor v7 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Build (Tensor Ref t)

out: Same as "var".

Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.

That is for rows we have grad for, we update var and accum as follows: - accum += grad * grad - prox_v = var - prox_v -= lr * grad * (1 / sqrt(accum)) - var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

applyGradientDescent Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor v2 t

alpha: Scaling factor. Must be a scalar.

-> Tensor v3 t

delta: The change.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' by subtracting alpha * delta from it.

batchNormWithGlobalNormalization Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Bool

scale_after_normalization: A bool indicating whether the resulted tensor - needs to be multiplied with gamma.

-> Float

variance_epsilon: A small float number to avoid dividing by 0.

-> Tensor v1 t

t: A 4D input Tensor.

-> Tensor v2 t

m: A 1D mean Tensor with size matching the last dimension of t. - This is the first output from tf.nn.moments, - or a saved moving average thereof.

-> Tensor v3 t

v: A 1D variance Tensor with size matching the last dimension of t. - This is the second output from tf.nn.moments, - or a saved moving average thereof.

-> Tensor v4 t

beta: A 1D beta Tensor with size matching the last dimension of t. - An offset to be added to the normalized tensor.

-> Tensor v5 t

gamma: A 1D gamma Tensor with size matching the last dimension of t. - If "scale_after_normalization" is true, this tensor will be multiplied - with the normalized tensor.

-> Tensor Value t

result

Batch normalization.

This op is deprecated. Prefer `tf.nn.batch_normalization`.

encodeBase64 Source

Arguments

:: Tensor v1 ByteString

input: Strings to be encoded.

-> Tensor Value ByteString

output: Input strings encoded in base64.

Encode strings into web-safe base64 format.

Refer to the following article for more information on base64 format: - en.wikipedia.orgwikiBase64. Base64 strings may have padding with '=' at the - end so that the encoded has length multiple of 4. See Padding section of the - link above.

Web-safe means that the encoder uses - and _ instead of + and /.

stringJoin Source

Arguments

:: [Tensor v1 ByteString]

inputs: A list of string tensors. The tensors must all have the same shape, - or be scalars. Scalars may be mixed in; these will be broadcast to the shape - of non-scalar inputs.

-> Tensor Value ByteString

output

Joins the strings in the given list of string tensors into one tensor;

with the given separator (default is an empty separator).

cropAndResizeGradImage Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 Float

grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

-> Tensor v2 Float

boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor - specifies the coordinates of a box in the `box_ind[i]` image and is specified - in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of - y is mapped to the image coordinate at `y * (image_height - 1)`, so as the - `[0, 1]` interval of normalized image height is mapped to - `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in - which case the sampled crop is an up-down flipped version of the original - image. The width dimension is treated similarly. Normalized coordinates - outside the `[0, 1]` range are allowed, in which case we use - extrapolation_value to extrapolate the input image values.

-> Tensor v3 Int32

box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - The value of `box_ind[i]` specifies the image that the i-th box refers to.

-> Tensor v4 Int32

image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]` - containing the original image size. Both image_height and image_width need - to be positive.

-> Tensor Value t

output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.

Computes the gradient of the crop_and_resize op wrt the input image tensor.

tanh Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes hyperbolic tangent of x element-wise.

asString Source

Arguments

:: (TensorType t, OneOf `[Complex Float, Bool, Int32, Int64, Int8, Double, Float]` t) 
=> Tensor v1 t

input

-> Tensor Value ByteString

output

Converts each entry in the given tensor to strings. Supports many numeric

types and boolean.

iFFT2D Source

Arguments

:: Tensor v1 (Complex Float)

input: A complex64 tensor.

-> Tensor Value (Complex Float)

output: A complex64 tensor of the same shape as input. The inner-most 2 - dimensions of input are replaced with their inverse 2D Fourier Transform.

compatibility(numpy) - Equivalent to np.ifft2 - end_compatibility

Compute the inverse 2-dimensional discrete Fourier Transform over the inner-most

2 dimensions of input.

sparseConcat Source

Arguments

:: TensorType t 
=> Int64

concat_dim: Dimension to concatenate along. Must be in range [-rank, rank), - where rank is the number of dimensions in each input SparseTensor.

-> [Tensor v1 Int64]

indices: 2-D. Indices of each input SparseTensor.

-> [Tensor v2 t]

values: 1-D. Non-empty values of each SparseTensor.

-> [Tensor v3 Int64]

shapes: 1-D. Shapes of each SparseTensor.

-> (Tensor Value Int64, Tensor Value t, Tensor Value Int64)

(output_indices, output_values, output_shape)

  • output_indices: 2-D. Indices of the concatenated SparseTensor.
  • output_values: 1-D. Non-empty values of the concatenated SparseTensor.
  • output_shape: 1-D. Shape of the concatenated SparseTensor.

Concatenates a list of SparseTensor along the specified dimension.

Concatenation is with respect to the dense versions of these sparse tensors. - It is assumed that each input is a SparseTensor whose elements are ordered - along increasing dimension number.

All inputs' shapes must match, except for the concat dimension. The - indices, values, and shapes lists must have the same length.

The output shape is identical to the inputs', except along the concat - dimension, where it is the sum of the inputs' sizes along that dimension.

The output elements will be resorted to preserve the sort order along - increasing dimension number.

This op runs in `O(M log M)` time, where M is the total number of non-empty - values across all inputs. This is due to the need for an internal sort in - order to concatenate efficiently across an arbitrary dimension.

For example, if `concat_dim = 1` and the inputs are

sp_inputs[0]: shape = [2, 3] - [0, 2]: "a" - [1, 0]: "b" - [1, 1]: "c"

sp_inputs[1]: shape = [2, 4] - [0, 1]: "d" - [0, 2]: "e"

then the output will be

shape = [2, 7] - [0, 2]: "a" - [0, 4]: "d" - [0, 5]: "e" - [1, 0]: "b" - [1, 1]: "c"

Graphically this is equivalent to doing

a
concat [ d e ] = [ a d e ]
b c
[ ] [b c ]

shardedFilespec Source

Arguments

:: Tensor v1 ByteString

basename

-> Tensor v2 Int32

num_shards

-> Tensor Value ByteString

filename

Generate a glob pattern matching all sharded file names.

transpose Source

Arguments

:: (TensorType t, TensorType tperm, OneOf `[Int32, Int64]` tperm) 
=> Tensor v1 t

x

-> Tensor v2 tperm

perm

-> Tensor Value t

y

Shuffle dimensions of x according to a permutation.

The output y has the same rank as x. The shapes of x and y satisfy: - `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`

reduceJoin Source

Arguments

:: Tensor v1 ByteString

inputs: The input to be joined. All reduced indices must have non-zero size.

-> Tensor v2 Int32

reduction_indices: The dimensions to reduce over. Dimensions are reduced in the - order specified. Omitting reduction_indices is equivalent to passing - `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.

-> Tensor Value ByteString

output: Has shape equal to that of the input with reduced dimensions removed or - set to `1` depending on keep_dims.

Joins a string Tensor across the given dimensions.

Computes the string join across dimensions in the given string Tensor of shape - `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input - strings with the given separator (default: empty string). Negative indices are - counted backwards from the end, with `-1` being equivalent to `n - 1`. Passing - an empty reduction_indices joins all strings in linear index order and outputs - a scalar string.

For example:

``` - # tensor a is [["a", "b"], ["c", "d"]] - tf.reduce_join(a, 0) ==> ["ac", "bd"] - tf.reduce_join(a, 1) ==> ["ab", "cd"] - tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] - tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] - tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] - tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] - tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] - tf.reduce_join(a, [0, 1]) ==> ["acbd"] - tf.reduce_join(a, [1, 0]) ==> ["abcd"] - tf.reduce_join(a, []) ==> ["abcd"] - ```

stringToHashBucket Source

Arguments

:: Int64

num_buckets: The number of buckets.

-> Tensor v1 ByteString

string_tensor

-> Tensor Value Int64

output: A Tensor of the same shape as the input string_tensor.

Converts each string in the input Tensor to its hash mod by a number of buckets.

The hash function is deterministic on the content of the string within the + values = [hello, world, a, b, c]

stringSplit'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

input: 1-D. Strings to split.

-> Tensor v'2 ByteString

delimiter: 0-D. Delimiter characters (bytes), or empty string.

-> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64)

(indices, values, shape)

  • indices: A dense matrix of int64 representing the indices of the sparse tensor.
  • values: A vector of strings corresponding to the splited values.
  • shape: a length-2 vector of int64 representing the shape of the sparse + tensor, where the first value is N and the second value is the maximum number + of tokens in a single input entry.

stringToHashBucket

Arguments

:: Int64

num_buckets: The number of buckets.

-> Tensor v'1 ByteString

string_tensor

-> Tensor Build Int64

output: A Tensor of the same shape as the input string_tensor.

Converts each string in the input Tensor to its hash mod by a number of buckets.

The hash function is deterministic on the content of the string within the process.

Note that the hash function may change from time to time. This functionality will be deprecated and it's recommended to use - `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.

multinomial Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` - represents the unnormalized log probabilities for all classes.

-> Tensor v2 Int32

num_samples: 0-D. Number of independent samples to draw for each row slice.

-> Build (Tensor Value Int64)

output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` - contains the drawn class labels with range `[0, num_classes)`.

Draws samples from a multinomial distribution.

stringToHashBucketStrong Source

Arguments

:: Int64

num_buckets: The number of buckets.

-> Tensor v1 ByteString

input: The strings to assign a hash bucket.

-> Tensor Value Int64

output: A Tensor of the same shape as the input string_tensor.

Converts each string in the input Tensor to its hash mod by a number of buckets.

The hash function is deterministic on the content of the string within the + `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.

stringToHashBucket'

Arguments

:: OpParams 
-> Int64

num_buckets: The number of buckets.

-> Tensor v'1 ByteString

string_tensor

-> Tensor Build Int64

output: A Tensor of the same shape as the input string_tensor.

stringToHashBucketFast

Arguments

:: Int64

num_buckets: The number of buckets.

-> Tensor v'1 ByteString

input: The strings to assign a hash bucket.

-> Tensor Build Int64

output: A Tensor of the same shape as the input string_tensor.

Converts each string in the input Tensor to its hash mod by a number of buckets.

The hash function is deterministic on the content of the string within the + process and will never change. However, it is not suitable for cryptography. + This function may be used when CPU time is scarce and inputs are trusted or + unimportant. There is a risk of adversaries constructing inputs that all hash + to the same bucket. To prevent this problem, use a strong hash function with + `tf.string_to_hash_bucket_strong`.

stringToHashBucketFast'

Arguments

:: OpParams 
-> Int64

num_buckets: The number of buckets.

-> Tensor v'1 ByteString

input: The strings to assign a hash bucket.

-> Tensor Build Int64

output: A Tensor of the same shape as the input string_tensor.

stringToHashBucketStrong

Arguments

:: Int64

num_buckets: The number of buckets.

-> Tensor v'1 ByteString

input: The strings to assign a hash bucket.

-> Tensor Build Int64

output: A Tensor of the same shape as the input string_tensor.

Converts each string in the input Tensor to its hash mod by a number of buckets.

The hash function is deterministic on the content of the string within the process. The hash function is a keyed hash function, where attribute key defines the key of the hash function. key is an array of 2 elements.

A strong hash is important when inputs may be malicious, e.g. URLs with additional components. Adversaries could try to make their inputs hash to the same bucket for a denial-of-service attack or to skew the results. A strong hash prevents this by making it dificult, if not infeasible, to compute inputs that hash to the same bucket. This comes at a cost of roughly 4x higher compute - time than `tf.string_to_hash_bucket_fast`.

scatterNdUpdate Source

Arguments

:: (TensorType t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: A mutable Tensor. Should be from a Variable node.

-> Tensor v2 tindices

indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

-> Tensor v3 t

updates: A Tensor. Must have the same type as ref. A tensor of updated - values to add to ref.

-> Build (Tensor Ref t)

output_ref: Same as ref. Returned as a convenience for operations that want to - use the updated values after the update is done.

Applies sparse updates to individual values or slices within a given

variable according to indices.

ref is a Tensor with rank P and indices is a Tensor of rank Q.

indices must be integer tensor, containing indices into ref. - It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

The innermost dimension of indices (with length K) corresponds to - indices into elements (if `K = P`) or slices (if `K < P`) along the Kth - dimension of ref.

updates is Tensor of rank `Q-1+P-K` with shape:

``` - [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. - ```

For example, say we want to update 4 scattered elements to a rank-1 tensor to - 8 elements. In Python, that update would look like this:

ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - indices = tf.constant([[4], [3], [1] ,[7]]) - updates = tf.constant([9, 10, 11, 12]) - update = tf.scatter_nd_update(ref, indices, updates) - with tf.Session() as sess: - print sess.run(update)

The resulting update to ref would look like this:

1, 11, 3, 10, 9, 6, 7, 12

See tf.scatter_nd for more details about how to make updates to - slices.

fakeQuantWithMinMaxVarsGradient Source

Arguments

:: Tensor v1 Float

gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.

-> Tensor v2 Float

inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. - min, max: Quantization interval, scalar floats.

-> Tensor v3 Float

min

-> Tensor v4 Float

max

-> (Tensor Value Float, Tensor Value Float, Tensor Value Float)

(backprops_wrt_input, backprop_wrt_min, backprop_wrt_max)

  • backprops_wrt_input: Backpropagated gradients w.r.t. inputs: - `gradients * (inputs >= min && inputs <= max)`.
  • backprop_wrt_min: Backpropagated gradients w.r.t. min parameter: - `sum(gradients * (inputs < min))`.
  • backprop_wrt_max: Backpropagated gradients w.r.t. max parameter: - `sum(gradients * (inputs > max))`.

Compute gradients for a FakeQuantWithMinMaxVars operation.

size Source

Arguments

:: (TensorType t, TensorType out_type, OneOf `[Int32, Int64]` out_type) 
=> Tensor v1 t

input

-> Tensor Value out_type

output

Returns the size of a tensor.

This operation returns an integer representing the number of elements in - input.

For example:

```prettyprint - # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] - size(t) ==> 12 - ```

scatterDiv Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v3 t

updates: A tensor of values that ref is divided by.

-> Build (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

Divides a variable reference by sparse updates.

This operation computes

# Scalar indices - ref[indices, ...] /= updates[...]

# Vector indices (for each i) - ref[indices[i], ...] /= updates[i, ...]

# High rank indices (for each i, ..., j) - ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]

This operation outputs ref after the update is done. - This makes it easier to chain operations that need to use the reset value.

Duplicate entries are handled correctly: if multiple indices reference - the same location, their contributions divide.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

scatterMul Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v3 t

updates: A tensor of updated values to multiply to ref.

-> Build (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

Multiplies sparse updates into a variable reference.

This operation computes

# Scalar indices - ref[indices, ...] *= updates[...]

# Vector indices (for each i) - ref[indices[i], ...] *= updates[i, ...]

# High rank indices (for each i, ..., j) - ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]

This operation outputs ref after the update is done. - This makes it easier to chain operations that need to use the reset value.

Duplicate entries are handled correctly: if multiple indices reference - the same location, their contributions multiply.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

copyHost Source

Arguments

:: TensorType t 
=> Tensor v1 t

input: Input tensor.

-> Tensor Value t

output: Output tensor, deep-copied from input.

Copy Host Op.

Performs CPU-to-CPU deep-copying of tensor.

Unlike the Copy Op, this op has HostMemory constraint on its input or output.

wholeFileReader Source

Arguments

:: Build (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

A Reader that outputs the entire contents of a file as a value.

To use, enqueue filenames in a Queue. The output of ReaderRead will - be a filename (key) and the contents of that file (value).

takeManySparseFromTensorsMap Source

Arguments

:: TensorType dtype 
=> Tensor v1 Int64

sparse_handles: 1-D, The N serialized SparseTensor objects. - Shape: `[N]`.

-> Build (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

(sparse_indices, sparse_values, sparse_shape)

  • sparse_indices: 2-D. The indices of the minibatch SparseTensor.
  • sparse_values: 1-D. The values of the minibatch SparseTensor.
  • sparse_shape: 1-D. The shape of the minibatch SparseTensor.

Read SparseTensors from a SparseTensorsMap and concatenate them.

The input sparse_handles must be an int64 matrix of shape `[N, 1]` where - N is the minibatch size and the rows correspond to the output handles of - AddSparseToTensorsMap or AddManySparseToTensorsMap. The ranks of the - original SparseTensor objects that went into the given input ops must all - match. When the final SparseTensor is created, it has rank one - higher than the ranks of the incoming SparseTensor objects - (they have been concatenated along a new row dimension on the left).

The output SparseTensor object's shape values for all dimensions but the - first are the max across the input SparseTensor objects' shape values - for the corresponding dimensions. Its first shape value is N, the minibatch - size.

The input SparseTensor objects' indices are assumed ordered in - standard lexicographic order. If this is not the case, after this - step run SparseReorder to restore index ordering.

For example, if the handles represent an input, which is a `[2, 3]` matrix - representing two original SparseTensor objects:

``` - index = [ 0] - [10] - [20] - values = [1, 2, 3] - shape = [50] - ```

and

``` - index = [ 2] - [10] - values = [4, 5] - shape = [30] - ```

then the final SparseTensor will be:

``` - index = [0 0] - [0 10] - [0 20] - [1 2] - [1 10] - values = [1, 2, 3, 4, 5] - shape = [2 50] - ```

destroyTemporaryVariable Source

Arguments

:: TensorType t 
=> Tensor Ref t

ref: A reference to the temporary variable tensor.

-> Build (Tensor Value t)

value

Destroys the temporary variable and returns its final value.

Sets output to the value of the Tensor pointed to by ref, then destroys - the temporary variable called var_name. - All other uses of ref *must* have executed before this op. - This is typically achieved by chaining the ref through each assign op, or by - using control dependencies.

Outputs the final value of the tensor pointed to by ref.

assignSub Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v2 t

value: The value to be subtracted to the variable.

-> Build (Tensor Ref t)

output_ref: = Same as "ref". Returned as a convenience for operations that want - to use the new value after the variable has been updated.

Update ref by subtracting value from it.

This operation outputs "ref" after the update is done. - This makes it easier to chain operations that need to use the reset value.

encodeJpeg Source

Arguments

:: Tensor v1 Word8

image: 3-D with shape `[height, width, channels]`.

-> Tensor Value ByteString

contents: 0-D. JPEG-encoded image.

JPEG-encode an image.

image is a 3-D uint8 Tensor of shape `[height, width, channels]`.

The attr format can be used to override the color format of the encoded - output. Values can be:

  • `''`: Use a default format based on the number of channels in the image.
  • grayscale: Output a grayscale JPEG image. The channels dimension - of image must be 1.
  • rgb: Output an RGB JPEG image. The channels dimension - of image must be 3.

If format is not specified or is the empty string, a default format is picked - in function of the number of channels in image:

  • 1: Output a grayscale image.
  • 3: Output an RGB image.

temporaryVariable Source

Arguments

:: TensorType dtype 
=> Shape

shape: The shape of the variable tensor.

-> Build (Tensor Ref dtype)

ref: A reference to the variable tensor.

Returns a tensor that may be mutated, but only persists within a single step.

This is an experimental op for internal use only and it is possible to use this - op in unsafe ways. DO NOT USE unless you fully understand the risks.

It is the caller's responsibility to ensure that ref is eventually passed to a - matching DestroyTemporaryVariable op after all other uses have completed.

Outputs a ref to the tensor state so it may be read or modified.

E.g. - var = state_ops._temporary_variable([1, 2], types.float_) - var_name = var.op.name - var = state_ops.assign(var, [[4.0, 5.0]]) - var = state_ops.assign_add(var, [[6.0, 7.0]]) - final = state_ops._destroy_temporary_variable(var, var_name=var_name)

isVariableInitialized Source

Arguments

:: TensorType dtype 
=> Tensor Ref dtype

ref: Should be from a Variable node. May be uninitialized.

-> Build (Tensor Value Bool)

is_initialized

Checks whether a tensor has been initialized.

Outputs boolean scalar indicating whether the tensor has been initialized.

variable Source

Arguments

:: TensorType dtype 
=> Shape

shape: The shape of the variable tensor.

-> Build (Tensor Ref dtype)

ref: A reference to the variable tensor.

Holds state in the form of a tensor that persists across steps.

Outputs a ref to the tensor state so it may be read or modified. - TODO(zhifengc/mrry): Adds a pointer to a more detail document - about sharing states in tensorflow.

sparseSparseMinimum Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 Int64

a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, in the canonical lexicographic ordering.

-> Tensor v2 t

a_values: 1-D. N non-empty values corresponding to a_indices.

-> Tensor v3 Int64

a_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v4 Int64

b_indices: counterpart to a_indices for the other operand.

-> Tensor v5 t

b_values: counterpart to a_values for the other operand; must be of the same dtype.

-> Tensor v6 Int64

b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

-> (Tensor Value Int64, Tensor Value t)

(output_indices, output_values)

  • output_indices: 2-D. The indices of the output SparseTensor.
  • output_values: 1-D. The values of the output SparseTensor.

Returns the element-wise min of two SparseTensors.

Assumes the two SparseTensors have the same shape, i.e., no broadcasting.

betainc Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

a

-> Tensor v2 t

b

-> Tensor v3 t

x

-> Tensor Value t

z

Compute the regularized incomplete beta integral \(I_x(a, b)\).

The regularized incomplete beta integral is defined as:

``` - I_x(a, b) = frac{B(x; a, b)}{B(a, b)} - ``` - where

``` - B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt - ```

is the incomplete beta function and \(B(a, b)\) is the *complete* - beta function.

assign Source

Arguments

:: TensorType t 
=> Tensor Ref t

ref: Should be from a Variable node. May be uninitialized.

-> Tensor v2 t

value: The value to be assigned to the variable.

-> Build (Tensor Ref t)

output_ref: = Same as "ref". Returned as a convenience for operations that want - to use the new value after the variable has been reset.

Update ref by assigning value to it.

This operation outputs "ref" after the assignment is done. - This makes it easier to chain operations that need to use the reset value.

sparseSoftmax Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 Int64

sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a - SparseTensor, in canonical ordering.

-> Tensor v2 t

sp_values: 1-D. NNZ non-empty values corresponding to sp_indices.

-> Tensor v3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor Value t

output: 1-D. The NNZ values for the result SparseTensor.

Applies softmax to a batched N-D SparseTensor.

The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` - (where `N >= 2`), and with indices sorted in the canonical lexicographic order.

This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost - logical submatrix with shape `[B, C]`, but with the catch that *the implicitly - zero elements do not participate*. Specifically, the algorithm is equivalent - to the following:

  1. Applies `tf.nn.softmax()` to a densified view of each innermost submatrix - with shape `[B, C]`, along the size-C dimension;
  2. Masks out the original implicitly-zero locations;
  3. Renormalizes the remaining elements.

Hence, the SparseTensor result has exactly the same non-zero indices and - shape.

sparseDenseCwiseAdd Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 Int64

sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

-> Tensor v2 t

sp_values: 1-D. N non-empty values corresponding to sp_indices.

-> Tensor v3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v4 t

dense: R-D. The dense Tensor operand.

-> Tensor Value t

output: 1-D. The N values that are operated on.

Adds up a SparseTensor and a dense Tensor, using these special rules:

  1. Broadcasts the dense side to have the same shape as the sparse side, if - eligible;
  2. Then, only the dense values pointed to by the indices of the SparseTensor - participate in the cwise addition.

By these rules, the result is a logical SparseTensor with exactly the same - indices and shape, but possibly with different non-zero values. The output of - this Op is the resultant non-zero values.

logicalNot Source

Arguments

:: Tensor v1 Bool

x

-> Tensor Value Bool

y

Returns the truth value of NOT x element-wise.

queueSize Source

Arguments

:: Tensor Ref ByteString

handle: The handle to a queue.

-> Build (Tensor Value Int32)

size: The number of elements in the given queue.

Computes the number of elements in the given queue.

sparseApplyAdagrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v3 t

lr: Learning rate. Must be a scalar.

-> Tensor v4 t

grad: The gradient.

-> Tensor v5 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Build (Tensor Ref t)

out: Same as "var".

Update relevant entries in '*var' and '*accum' according to the adagrad scheme.

That is for rows we have grad for, we update var and accum as follows: - accum += grad * grad - var -= lr * grad * (1 / sqrt(accum))

getSessionHandle Source

Arguments

:: TensorType t 
=> Tensor v1 t

value: The tensor to be stored.

-> Tensor Value ByteString

handle: The handle for the tensor stored in the session state.

Store the input tensor in the state of the current session.

sparseDenseCwiseMul Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 Int64

sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

-> Tensor v2 t

sp_values: 1-D. N non-empty values corresponding to sp_indices.

-> Tensor v3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v4 t

dense: R-D. The dense Tensor operand.

-> Tensor Value t

output: 1-D. The N values that are operated on.

Component-wise multiplies a SparseTensor by a dense Tensor.

The output locations corresponding to the implicitly zero elements in the sparse - tensor will be zero (i.e., will not take up storage space), regardless of the - contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).

  • Limitation*: this Op only broadcasts the dense side to the sparse side, but not - the other direction.

sparseTensorDenseAdd Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor v1 tindices

a_indices: 2-D. The indices of the SparseTensor, with shape `[nnz, ndims]`.

-> Tensor v2 t

a_values: 1-D. The values of the SparseTensor, with shape `[nnz]`.

-> Tensor v3 tindices

a_shape: 1-D. The shape of the SparseTensor, with shape `[ndims]`.

-> Tensor v4 t

b: ndims-D Tensor. With shape a_shape.

-> Tensor Value t

output

Adds up a SparseTensor and a dense Tensor, producing a dense Tensor.

This Op does not require a_indices be sorted in standard lexicographic order.

getSessionTensor Source

Arguments

:: TensorType dtype 
=> Tensor v1 ByteString

handle: The handle for a tensor stored in the session state.

-> Tensor Value dtype

value: The tensor for the given handle.

Get the value of the tensor specified by its handle.

sparseReorder Source

Arguments

:: TensorType t 
=> Tensor v1 Int64

input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

-> Tensor v2 t

input_values: 1-D. N non-empty values corresponding to input_indices.

-> Tensor v3 Int64

input_shape: 1-D. Shape of the input SparseTensor.

-> (Tensor Value Int64, Tensor Value t)

(output_indices, output_values)

  • output_indices: 2-D. `N x R` matrix with the same indices as input_indices, but - in canonical row-major ordering.
  • output_values: 1-D. N non-empty values corresponding to output_indices.

Reorders a SparseTensor into the canonical, row-major ordering.

Note that by convention, all sparse ops preserve the canonical ordering along - increasing dimension number. The only time ordering can be violated is during - manual manipulation of the indices and values vectors to add entries.

Reordering does not affect the shape of the SparseTensor.

If the tensor has rank R and N non-empty values, input_indices has - shape `[N, R]`, input_values has length N, and input_shape has length R.

sparseSplit Source

Arguments

:: TensorType t 
=> Int64

num_split: The number of ways to split.

-> Tensor v1 Int64

split_dim: 0-D. The dimension along which to split. Must be in the range - `[0, rank(shape))`.

-> Tensor v2 Int64

indices: 2-D tensor represents the indices of the sparse tensor.

-> Tensor v3 t

values: 1-D tensor represents the values of the sparse tensor.

-> Tensor v4 Int64

shape: 1-D. tensor represents the shape of the sparse tensor. - output indices: A list of 1-D tensors represents the indices of the output - sparse tensors.

-> ([Tensor Value Int64], [Tensor Value t], [Tensor Value Int64])

(output_indices, output_values, output_shape)

  • output_indices
  • output_values: A list of 1-D tensors represents the values of the output sparse - tensors.
  • output_shape: A list of 1-D tensors represents the shape of the output sparse - tensors.

Split a SparseTensor into num_split tensors along one dimension.

If the `shape[split_dim]` is not an integer multiple of num_split. Slices - `[0 : shape[split_dim] % num_split]` gets one extra dimension. - For example, if `split_dim = 1` and `num_split = 2` and the input is

input_tensor = shape = [2, 7] - [ a d e ] - [b c ]

Graphically the output tensors are:

output_tensor[0] = shape = [2, 4] - [ a ] - [b c ]

output_tensor[1] = shape = [2, 3] - [ d e ] - [ ]

pad Source

Arguments

:: (TensorType t, TensorType tpaddings, OneOf `[Int32, Int64]` tpaddings) 
=> Tensor v1 t

input

-> Tensor v2 tpaddings

paddings

-> Tensor Value t

output

Pads a tensor with zeros.

This operation pads a input with zeros according to the paddings you - specify. paddings is an integer tensor with shape `[Dn, 2]`, where n is the - rank of input. For each dimension D of input, `paddings[D, 0]` indicates - how many zeros to add before the contents of input in that dimension, and - `paddings[D, 1]` indicates how many zeros to add after the contents of input - in that dimension.

The padded size of each dimension D of the output is:

`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`

For example:

```prettyprint - # t is [[1, 1], [2, 2]] - # paddings is [[1, 1], [2, 2]] - # rank of t is 2 - pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] - [0, 0, 1, 1, 0, 0] - [0, 0, 2, 2, 0, 0] - [0, 0, 0, 0, 0, 0]] - ```

sparseToDense Source

Arguments

:: (TensorType t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor v1 tindices

sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete - index where `sparse_values[i]` will be placed.

-> Tensor v2 tindices

output_shape: 1-D. Shape of the dense output tensor.

-> Tensor v3 t

sparse_values: 1-D. Values corresponding to each row of sparse_indices, - or a scalar value to be used for all sparse indices.

-> Tensor v4 t

default_value: Scalar value to set for indices not specified in - sparse_indices.

-> Tensor Value t

dense: Dense output tensor of shape output_shape.

Converts a sparse representation into a dense tensor.

Builds an array dense with shape output_shape such that

```prettyprint - # If sparse_indices is scalar - dense[i] = (i == sparse_indices ? sparse_values : default_value)

# If sparse_indices is a vector, then for each i - dense[sparse_indices[i]] = sparse_values[i]

# If sparse_indices is an n by d matrix, then for each i in [0, n) - dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] - ```

All other values in dense are set to default_value. If sparse_values is a - scalar, all sparse indices are set to this single value.

Indices should be sorted in lexicographic order, and indices must not - contain any repeats. If validate_indices is true, these properties - are checked during execution.

sparseTensorDenseMatMul Source

Arguments

:: TensorType t 
=> Tensor v1 Int64

a_indices: 2-D. The indices of the SparseTensor, size `[nnz, 2]` Matrix.

-> Tensor v2 t

a_values: 1-D. The values of the SparseTensor, size `[nnz]` Vector.

-> Tensor v3 Int64

a_shape: 1-D. The shape of the SparseTensor, size `[2]` Vector.

-> Tensor v4 t

b: 2-D. A dense Matrix.

-> Tensor Value t

product

Multiply SparseTensor (of rank 2) A by dense matrix B.

No validity checking is performed on the indices of A. However, the following - input format is recommended for optimal behavior:

if adjoint_a == false: - A should be sorted in lexicographically increasing order. Use SparseReorder - if you're not sure. - if adjoint_a == true: - A should be sorted in order of increasing dimension 1 (i.e., "column major" - order instead of "row major" order).

mirrorPadGrad Source

Arguments

:: (TensorType t, TensorType tpaddings, OneOf `[Int32, Int64]` tpaddings) 
=> Tensor v1 t

input: The input tensor to be folded.

-> Tensor v2 tpaddings

paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

-> Tensor Value t

output: The folded tensor.

Gradient op for MirrorPad op. This op folds a mirror-padded tensor.

This operation folds the padded areas of input by MirrorPad according to the - paddings you specify. paddings must be the same as paddings argument - given to the corresponding MirrorPad op.

The folded size of each dimension D of the output is:

`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`

For example:

```prettyprint - # t is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. - # paddings is [[0, 1]], [0, 1]]. - # mode is SYMMETRIC. - # rank of t is 2. - pad(t, paddings) ==> [[ 1, 5] - [11, 28]] - ```

randomShuffle Source

Arguments

:: TensorType t 
=> Tensor v1 t

value: The tensor to be shuffled.

-> Build (Tensor Value t)

output: A tensor of same shape and type as value, shuffled along its first - dimension.

Randomly shuffles a tensor along its first dimension.

The tensor is shuffled along dimension 0, such that each `value[j]` is mapped - to one and only one `output[i]`. For example, a mapping that might occur for a - 3x2 tensor is:

```prettyprint - [[1, 2], [[5, 6], - [3, 4], ==> [1, 2], - [5, 6]] [3, 4]] - ```

select Source

Arguments

:: TensorType t 
=> Tensor v1 Bool

condition

-> Tensor v2 t

t: = A Tensor which may have the same shape as condition. - If condition is rank 1, t may have higher rank, - but its first dimension must match the size of condition.

-> Tensor v3 t

e: = A Tensor with the same type and shape as t.

-> Tensor Value t

output: = A Tensor with the same type and shape as t and e.

Selects elements from t or e, depending on condition.

The t, and e tensors must all have the same shape, and the - output will also have that shape.

The condition tensor must be a scalar if t and e are scalars. - If t and e are vectors or higher rank, then condition must be either a - scalar, a vector with size matching the first dimension of t, or must have - the same shape as t.

The condition tensor acts as a mask that chooses, based on the value at each - element, whether the corresponding element / row in the output should be - taken from t (if true) or e (if false).

If condition is a vector and t and e are higher rank matrices, then - it chooses which row (outer dimension) to copy from t and e. - If condition has the same shape as t and e, then it chooses which - element to copy from t and e.

For example:

```prettyprint - # condition tensor is [[True, False] - # [False, True]] - # t is [[1, 2], - # [3, 4]] - # e is [[5, 6], - # [7, 8]] - select(condition, t, e) ==> [[1, 6], - [7, 4]]

# condition tensor is [True, False] - # t is [[1, 2], - # [3, 4]] - # e is [[5, 6], - # [7, 8]] - select(condition, t, e) ==> [[1, 2], - [7, 8]]

```

sparseAddGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to - the non-empty values of the sum.

-> Tensor v2 Int64

a_indices: 2-D. The indices of the SparseTensor A, size `[nnz(A), ndims]`.

-> Tensor v3 Int64

b_indices: 2-D. The indices of the SparseTensor B, size `[nnz(B), ndims]`.

-> Tensor v4 Int64

sum_indices: 2-D. The indices of the sum SparseTensor, size - `[nnz(sum), ndims]`.

-> (Tensor Value t, Tensor Value t)

(a_val_grad, b_val_grad)

  • a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the - non-empty values of A.
  • b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the - non-empty values of B.

The gradient operator for the SparseAdd op.

The SparseAdd op calculates A + B, where A, B, and the sum are all represented - as SparseTensor objects. This op takes in the upstream gradient w.r.t. - non-empty values of the sum, and outputs the gradients w.r.t. the non-empty - values of A and B.

sdcaFprint Source

Arguments

:: Tensor v1 ByteString

input: vector of strings to compute fingerprints on.

-> Tensor Value Int64

output: a (N,2) shaped matrix where N is the number of elements in the input - vector. Each row contains the low and high parts of the fingerprint.

Computes fingerprints of the input strings.

tensorArrayUnpack Source

Arguments

:: TensorType t 
=> Tensor Ref ByteString

handle

-> Tensor v2 t

value

-> Tensor v3 Float

flow_in

-> Build (Tensor Value Float)

flow_out

quantizedAvgPool Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Word16, Word8]` t) 
=> Tensor v1 t

input: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v2 Float

min_input: The float value that the lowest quantized input value represents.

-> Tensor v3 Float

max_input: The float value that the highest quantized input value represents.

-> (Tensor Value t, Tensor Value Float, Tensor Value Float)

(output, min_output, max_output)

  • output
  • min_output: The float value that the lowest quantized output value represents.
  • max_output: The float value that the highest quantized output value represents.

Produces the average pool of the input tensor for quantized types.

adjustContrastv2 Source

Arguments

:: Tensor v1 Float

images: Images to adjust. At least 3-D.

-> Tensor v2 Float

contrast_factor: A float multiplier for adjusting contrast.

-> Tensor Value Float

output: The contrast-adjusted image or images.

Adjust the contrast of one or more images.

images is a tensor of at least 3 dimensions. The last 3 dimensions are - interpreted as `[height, width, channels]`. The other dimensions only - represent a collection of images, such as `[batch, height, width, channels].`

Contrast is adjusted independently for each channel of each image.

For each channel, the Op first computes the mean of the image pixels in the - channel and then adjusts each component of each pixel to - `(x - mean) * contrast_factor + mean`.

resourceGather Source

Arguments

:: (TensorType dtype, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle dtype

resource

-> Tensor v2 tindices

indices

-> Build (Tensor Value dtype)

output

Gather slices from the variable pointed to by resource according to indices.

indices must be an integer tensor of any dimension (usually 0-D or 1-D). - Produces an output tensor with shape `indices.shape + params.shape[1:]` where:

```python - # Scalar indices - output[:, ..., :] = params[indices, :, ... :]

# Vector indices - output[i, :, ..., :] = params[indices[i], :, ... :]

# Higher rank indices - output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] - ```

mergeSummary Source

Arguments

:: [Tensor v1 ByteString]

inputs: Can be of any shape. Each must contain serialized Summary protocol - buffers.

-> Tensor Value ByteString

summary: Scalar. Serialized Summary protocol buffer.

Merges summaries.

This op creates a - `Summary` - protocol buffer that contains the union of all the values in the input - summaries.

When the Op is run, it reports an InvalidArgument error if multiple values - in the summaries to merge use the same tag.

serializeSparse Source

Arguments

:: TensorType t 
=> Tensor v1 Int64

sparse_indices: 2-D. The indices of the SparseTensor.

-> Tensor v2 t

sparse_values: 1-D. The values of the SparseTensor.

-> Tensor v3 Int64

sparse_shape: 1-D. The shape of the SparseTensor.

-> Tensor Value ByteString

serialized_sparse

Serialize a SparseTensor into a string 3-vector (1-D Tensor) object.

negTrain Source

Arguments

:: Int64

num_negative_samples: Number of negative samples per example.

-> Tensor Ref Float

w_in: input word embedding.

-> Tensor Ref Float

w_out: output word embedding.

-> Tensor v3 Int32

examples: A vector of word ids.

-> Tensor v4 Int32

labels: A vector of word ids.

-> Tensor v5 Float

lr

-> Build ControlNode 

Training via negative sampling.

tensorArrayCloseV2 Source

Arguments

:: Tensor v1 ByteString

handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

-> ControlNode 

Delete the TensorArray from its resource container. This enables

the user to close and release the resource in the middle of a step/run.

threadUnsafeUnigramCandidateSampler Source

Arguments

:: Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled - candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

-> Tensor v1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is - the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing - the number of times each candidate is expected to occur in a batch - of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled - candidate representing the number of times the candidate is expected - to occur in a batch of sampled candidates. If unique=true, then this is a - probability.

Generates labels for candidate sampling with a learned unigram distribution.

See explanations of candidate sampling and the data formats at - go/candidate-sampling.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the - possibility of efficient dense matrix multiplication. The disadvantage is that - the sampled candidates must be chosen independently of the context and of the - true labels.

stringToNumber Source

Arguments

:: (TensorType out_type, OneOf `[Int32, Float]` out_type) 
=> Tensor v1 ByteString

string_tensor

-> Tensor Value out_type

output: A Tensor of the same shape as the input string_tensor.

Converts each string in the input Tensor to the specified numeric type.

(Note that int32 overflow results in an error while float overflow - results in a rounded value.)

cTCBeamSearchDecoder Source

Arguments

:: Int64

beam_width: A scalar >= 0 (beam search beam width).

-> Int64

top_paths: A scalar >= 0, <= beam_width (controls output size).

-> Tensor v1 Float

inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

-> Tensor v2 Int32

sequence_length: A vector containing sequence lengths, size `(batch)`.

-> ([Tensor Value Int64], [Tensor Value Int64], [Tensor Value Int64], Tensor Value Float)

(decoded_indices, decoded_values, decoded_shape, log_probability)

  • decoded_indices: A list (length: top_paths) of indices matrices. Matrix j, - size `(total_decoded_outputs[j] x 2)`, has indices of a - `SparseTensor2`. The rows store: [batch, time].
  • decoded_values: A list (length: top_paths) of values vectors. Vector j, - size `(length total_decoded_outputs[j])`, has the values of a - `SparseTensor2`. The vector stores the decoded classes for beam j.
  • decoded_shape: A list (length: top_paths) of shape vector. Vector j, - size `(2)`, stores the shape of the decoded `SparseTensor[j]`. - Its values are: `[batch_size, max_decoded_length[j]]`.
  • log_probability: A matrix, shaped: `(batch_size x top_paths)`. The - sequence log-probabilities.

Performs beam search decoding on the logits given in input.

A note about the attribute merge_repeated: For the beam search decoder, - this means that if consecutive entries in a beam are the same, only - the first of these is emitted. That is, when the top path is "A B B B B", - "A B" is returned if merge_repeated = True but "A B B B B" is - returned if merge_repeated = False.

parseTensor Source

Arguments

:: TensorType out_type 
=> Tensor v1 ByteString

serialized: A scalar string containing a serialized TensorProto proto.

-> Tensor Value out_type

output: A Tensor of type out_type.

Transforms a serialized tensorflow.TensorProto proto into a Tensor.

imageSummary Source

Arguments

:: (TensorType t, OneOf `[Word16, Word8, Float]` t) 
=> Tensor v1 ByteString

tag: Scalar. Used to build the tag attribute of the summary values.

-> Tensor v2 t

tensor: 4-D of shape `[batch_size, height, width, channels]` where - channels is 1, 3, or 4.

-> Tensor Value ByteString

summary: Scalar. Serialized Summary protocol buffer.

Outputs a Summary protocol buffer with images.

The summary has up to max_images summary values containing images. The - images are built from tensor which must be 4-D with shape `[batch_size, - height, width, channels]` and where channels can be:

  • 1: tensor is interpreted as Grayscale.
  • 3: tensor is interpreted as RGB.
  • 4: tensor is interpreted as RGBA.

The images have the same number of channels as the input tensor. For float - input, the values are normalized one image at a time to fit in the range - `[0, 255]`. uint8 values are unchanged. The op uses two different - normalization algorithms:

  • If the input values are all positive, they are rescaled so the largest one - is 255.
  • If any input value is negative, the values are shifted so input value 0.0 - is at 127. They are then rescaled so that either the smallest value is 0, - or the largest one is 255.

The tag argument is a scalar Tensor of type string. It is used to - build the tag of the summary values:

  • If max_images is 1, the summary value tag is '*tag*/image'.
  • If max_images is greater than 1, the summary value tags are - generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.

The bad_color argument is the color to use in the generated images for - non-finite input values. It is a unit8 1-D tensor of length channels. - Each element must be in the range `[0, 255]` (It represents the value of a - pixel in the output image). Non-finite values in the input tensor are - replaced by this tensor in the output image. The default value is the color - red.

truncateDiv Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns x / y element-wise for integer types.

Truncation designates that negative numbers will round fractional quantities - toward zero. I.e. -7 / 5 = 1. This matches C semantics but it is different - than Python semantics. See FloorDiv for a division function that matches - Python Semantics.

  • NOTE*: TruncateDiv supports broadcasting. More about broadcasting - here

cholesky Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input: Shape is `[..., M, M]`.

-> Tensor Value t

output: Shape is `[..., M, M]`.

Computes the Cholesky decomposition of one or more square matrices.

The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - form square matrices, with the same constraints as the single matrix Cholesky - decomposition above. The output is a tensor of the same shape as the input - containing the Cholesky decompositions for all input submatrices `[..., :, :]`.

batchMatrixSolveLs Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

matrix

-> Tensor v2 t

rhs

-> Tensor v3 Double

l2_regularizer

-> Tensor Value t

output

lookupTableExport Source

Arguments

:: (TensorType tkeys, TensorType tvalues) 
=> Tensor Ref ByteString

table_handle: Handle to the table.

-> Build (Tensor Value tkeys, Tensor Value tvalues)

(keys, values)

  • keys: Vector of all keys present in the table.
  • values: Tensor of all values in the table. Indexed in parallel with keys.

Outputs all keys and values in the table.

batchSvd Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Double, Float]` t) 
=> Tensor v1 t

input

-> (Tensor Value t, Tensor Value t, Tensor Value t)

(s, u, v)

  • s
  • u
  • v

resizeBicubic Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

-> Tensor Value Float

resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

Resize images to size using bicubic interpolation.

Input images can be of different types but output images are always float.

hSVToRGB Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.

-> Tensor Value t

output: images converted to RGB.

Convert one or more images from HSV to RGB.

Outputs a tensor of the same shape as the images tensor, containing the RGB - value of the pixels. The output is only well defined if the value in images - are in `[0,1]`.

See rgb_to_hsv for a description of the HSV encoding.

avgPool3D Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.

-> Tensor Value t

output: The average pooled output tensor.

Performs 3D average pooling on the input.

stackClose Source

Arguments

:: Tensor Ref ByteString

handle: The handle to a stack.

-> Build ControlNode 

Delete the stack from its resource container.

assignVariableOp Source

Arguments

:: TensorType dtype 
=> ResourceHandle dtype

resource: handle to the resource in which to store the variable.

-> Tensor v2 dtype

value: the value to set the new tensor to use.

-> Build ControlNode 

Assigns a new value to a variable.

Any ReadVariableOp with a control dependency on this op is guaranteed to return - this value or a subsequent newer value of the variable.

lRN Source

Arguments

:: (TensorType t, OneOf `[Word16, Float]` t) 
=> Tensor v1 t

input: 4-D.

-> Tensor Value t

output

Local Response Normalization.

The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last - dimension), and each vector is normalized independently. Within a given vector, - each component is divided by the weighted, squared sum of inputs within - depth_radius. In detail,

sqr_sum[a, b, c, d] = - sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) - output = input / (bias + alpha * sqr_sum) ** beta

For details, see Krizhevsky et al., ImageNet classification with deep - convolutional neural networks (NIPS 2012).

zeta Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

q

-> Tensor Value t

z

Compute the Hurwitz zeta function \(zeta(x, q)\).

The Hurwitz zeta function is defined as:

``` - zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x} - ```

tensorArrayGradV2 Source

Arguments

:: Tensor v1 ByteString

handle: The handle to the forward TensorArray.

-> Tensor v2 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> Build (Tensor Value ByteString)

grad_handle

Creates a TensorArray for storing the gradients of values in the given handle.

If the given TensorArray gradient already exists, returns a reference to it.

Locks the size of the original TensorArray by disabling its dynamic size flag.

  • *A note about the input flow_in:**

The handle flow_in forces the execution of the gradient lookup to occur - only after certain other operations have occurred. For example, when - the forward TensorArray is dynamically sized, writes to this TensorArray - may resize the object. The gradient TensorArray is statically sized based - on the size of the forward TensorArray when this operation executes. - Furthermore, the size of the forward TensorArray is frozen by this call. - As a result, the flow is used to ensure that the call to generate the gradient - TensorArray only happens after all writes are executed.

In the case of dynamically sized TensorArrays, gradient computation should - only be performed on read operations that have themselves been chained via - flow to occur only after all writes have executed. That way the final size - of the forward TensorArray is known when this operation is called.

  • *A note about the source attribute:**

TensorArray gradient calls use an accumulator TensorArray object. If - multiple gradients are calculated and run in the same session, the multiple - gradient nodes may accidentally flow throuth the same accumulator TensorArray. - This double counts and generally breaks the TensorArray gradient flow.

The solution is to identify which gradient call this particular - TensorArray gradient is being called in. This is performed by identifying - a unique string (e.g. "gradients", "gradients_1", ...) from the input - gradient Tensor's name. This string is used as a suffix when creating - the TensorArray gradient object here (the attribute source).

The attribute source is added as a suffix to the forward TensorArray's - name when performing the creation / lookup, so that each separate gradient - calculation gets its own TensorArray accumulator.

cast Source

Arguments

:: (TensorType srcT, TensorType dstT) 
=> Tensor v1 srcT

x

-> Tensor Value dstT

y

Cast x of type SrcT to y of DstT.

erf Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Computes the Gauss error function of x element-wise.

batchMatrixTriangularSolve Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

matrix

-> Tensor v2 t

rhs

-> Tensor Value t

output

resourceScatterAdd Source

Arguments

:: (TensorType dtype, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> ResourceHandle dtype

resource: Should be from a Variable node.

-> Tensor v2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v3 dtype

updates: A tensor of updated values to add to ref.

-> Build ControlNode 

Adds sparse updates to the variable referenced by resource.

This operation computes

# Scalar indices - ref[indices, ...] += updates[...]

# Vector indices (for each i) - ref[indices[i], ...] += updates[i, ...]

# High rank indices (for each i, ..., j) - ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]

Duplicate entries are handled correctly: if multiple indices reference - the same location, their contributions add.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterAdd.png" alt - /div

batchCholeskyGrad Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

l

-> Tensor v2 t

grad

-> Tensor Value t

output

batchMatrixInverse Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input

-> Tensor Value t

output

refIdentity Source

Arguments

:: TensorType t 
=> Tensor Ref t

input

-> Build (Tensor Ref t)

output

Return the same ref tensor as the input ref tensor.

svd Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Double, Float]` t) 
=> Tensor v1 t

input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions - form matrices of size `[M, N]`. Let P be the minimum of M and N.

-> (Tensor Value t, Tensor Value t, Tensor Value t)

(s, u, v)

  • s: Singular values. Shape is `[..., P]`.
  • u: Left singular vectors. If full_matrices is False then shape is - `[..., M, M]`; if full_matrices is True then shape is - `[..., M, P]`. Undefined if compute_uv is False.
  • v: Left singular vectors. If full_matrices is False then shape is - `[..., N, N]`. If full_matrices is True then shape is `[..., N, P]`. - Undefined if compute_uv is false.

Computes the singular value decompositions of one or more matrices.

Computes the SVD of each inner matrix in input such that - `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`

```prettyprint - # a is a tensor containing a batch of matrices. - # s is a tensor of singular values for each matrix. - # u is the tensor containing of left singular vectors for each matrix. - # v is the tensor containing of right singular vectors for each matrix. - s, u, v = svd(a) - s, _, _ = svd(a, compute_uv=False) - ```

matrixSolveLs Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

matrix: Shape is `[..., M, N]`.

-> Tensor v2 t

rhs: Shape is `[..., M, K]`.

-> Tensor v3 Double

l2_regularizer: Scalar tensor.

compatibility(numpy) - Equivalent to np.linalg.lstsq - end_compatibility

-> Tensor Value t

output: Shape is `[..., N, K]`.

Solves one or more linear least-squares problems.

matrix is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions - form matrices of size `[M, N]`. Rhs is a tensor of shape `[..., M, K]`. - The output is a tensor shape `[..., N, K]` where each output matrix solves - each of the equations matrix[..., :, :] * output[..., :, :] = rhs[..., :, :] - in the least squares sense.

matrix and right-hand sides in the batch:

matrix=\(A in Re^{m times n}\), - rhs=\(B in Re^{m times k}\), - output=\(X in Re^{n times k}\), - l2_regularizer=\(lambda\).

If fast is True, then the solution is computed by solving the normal - equations using Cholesky decomposition. Specifically, if \(m ge n\) then - \(X = (A^T A + lambda I)^{-1} A^T B\), which solves the least-squares - problem \(X = mathrm{argmin}_{Z in Re^{n times k}} ||A Z - B||_F^2 + - lambda ||Z||_F^2\). If \(m lt n\) then output is computed as - \(X = A^T (A A^T + lambda I)^{-1} B\), which (for \(lambda = 0\)) is the - minimum-norm solution to the under-determined linear system, i.e. - \(X = mathrm{argmin}_{Z in Re^{n times k}} ||Z||_F^2 \), subject to - \(A Z = B\). Notice that the fast path is only numerically stable when - \(A\) is numerically full rank and has a condition number - \(mathrm{cond}(A) lt frac{1}{sqrt{epsilon_{mach}}}\) or\(lambda\) is - sufficiently large.

If fast is False an algorithm based on the numerically robust complete - orthogonal decomposition is used. This computes the minimum-norm - least-squares solution, even when \(A\) is rank deficient. This path is - typically 6-7 times slower than the fast path. If fast is False then - l2_regularizer is ignored.

pack Source

Arguments

:: TensorType t 
=> [Tensor v1 t]

values: Must be of same shape and type.

-> Tensor Value t

output: The packed tensor.

Packs a list of N rank-R tensors into one rank-`(R+1)` tensor.

Packs the N tensors in values into a tensor with rank one higher than each - tensor in values, by packing them along the axis dimension. - Given a list of tensors of shape `(A, B, C)`;

if `axis == 0` then the output tensor will have the shape `(N, A, B, C)`. - if `axis == 1` then the output tensor will have the shape `(A, N, B, C)`. - Etc.

For example:

```prettyprint - # x is [1, 4] - # y is [2, 5] - # z is [3, 6] - pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. - pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] - ```

This is the opposite of unpack.

barrierClose Source

Arguments

:: Tensor Ref ByteString

handle: The handle to a barrier.

-> Build ControlNode 

Closes the given barrier.

This operation signals that no more new elements will be inserted in the - given barrier. Subsequent InsertMany that try to introduce a new key will fail. - Subsequent InsertMany operations that just add missing components to already - existing elements will continue to succeed. Subsequent TakeMany operations will - continue to succeed if sufficient completed elements remain in the barrier. - Subsequent TakeMany operations that would block will fail immediately.

selfAdjointEigV2 Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input: Tensor input of shape `[N, N]`.

-> (Tensor Value t, Tensor Value t)

(e, v)

  • e: Eigenvalues. Shape is `[N]`.
  • v: Eigenvectors. Shape is `[N, N]`.

Computes the eigen decomposition of one or more square self-adjoint matrices.

Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in - input such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.

```prettyprint - # a is a tensor. - # e is a tensor of eigenvalues. - # v is a tensor of eigenvectors. - e, v = self_adjoint_eig(a) - e = self_adjoint_eig(a, compute_v=False) - ```

scatterSub Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v3 t

updates: A tensor of updated values to subtract from ref.

-> Build (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

Subtracts sparse updates to a variable reference.

# Scalar indices - ref[indices, ...] -= updates[...]

# Vector indices (for each i) - ref[indices[i], ...] -= updates[i, ...]

# High rank indices (for each i, ..., j) - ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]

This operation outputs ref after the update is done. - This makes it easier to chain operations that need to use the reset value.

Duplicate entries are handled correctly: if multiple indices reference - the same location, their (negated) contributions add.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterSub.png" alt - /div

selfAdjointEig Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input: Shape is `[..., M, M]`.

-> Tensor Value t

output: Shape is `[..., M+1, M]`.

Computes the Eigen Decomposition of a batch of square self-adjoint matrices.

The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - form square matrices, with the same constraints as the single matrix - SelfAdjointEig.

The result is a [..., M+1, M] matrix with [..., 0,:] containing the - eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.

stopGradient Source

Arguments

:: TensorType t 
=> Tensor v1 t

input

-> Tensor Value t

output

Stops gradient computation.

When executed in a graph, this op outputs its input tensor as-is.

When building ops to compute gradients, this op prevents the contribution of - its inputs to be taken into account. Normally, the gradient generator adds ops - to a graph to compute the derivatives of a specified loss by recursively - finding out inputs that contributed to its computation. If you insert this op - in the graph it inputs are masked from the gradient generator. They are not - taken into account for computing gradients.

This is useful any time you want to compute a value with TensorFlow but need - to pretend that the value was a constant. Some examples include:

  • The *EM* algorithm where the *M-step* should not involve backpropagation - through the output of the *E-step*.
  • Contrastive divergence training of Boltzmann machines where, when - differentiating the energy function, the training must not backpropagate - through the graph that generated the samples from the model.
  • Adversarial training, where no backprop should happen through the adversarial - example generation process.

argMax Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

input

-> Tensor v2 tidx

dimension: int32, 0 <= dimension < rank(input). Describes which dimension - of the input Tensor to reduce across. For vectors, use dimension = 0.

-> Tensor Value Int64

output

Returns the index with the largest value across dimensions of a tensor.

choleskyGrad Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. - Algorithm depends only on lower triangular part of the innermost matrices of - this tensor.

-> Tensor v2 t

grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. - Algorithm depends only on lower triangular part of the innermost matrices of - this tensor.

-> Tensor Value t

output: Symmetrized version of df/dA . Shape is `[..., M, M]`

Computes the reverse mode backpropagated gradient of the Cholesky algorithm.

For an explanation see "Differentiation of the Cholesky algorithm" by - Iain Murray http://arxiv.org/abs/1602.07527.

sparseReshape Source

Arguments

:: Tensor v1 Int64

input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a - SparseTensor.

-> Tensor v2 Int64

input_shape: 1-D. R_in vector with the input SparseTensor's dense shape.

-> Tensor v3 Int64

new_shape: 1-D. R_out vector with the requested new dense shape.

-> (Tensor Value Int64, Tensor Value Int64)

(output_indices, output_shape)

  • output_indices: 2-D. `N x R_out` matrix with the updated indices of non-empty - values in the output SparseTensor.
  • output_shape: 1-D. R_out vector with the full dense shape of the output - SparseTensor. This is the same as new_shape but with any -1 dimensions - filled in.

Reshapes a SparseTensor to represent values in a new dense shape.

This operation has the same semantics as reshape on the represented dense - tensor. The input_indices are recomputed based on the requested new_shape.

If one component of new_shape is the special value -1, the size of that - dimension is computed so that the total dense size remains constant. At - most one component of new_shape can be -1. The number of dense elements - implied by new_shape must be the same as the number of dense elements - originally implied by input_shape.

Reshaping does not affect the order of values in the SparseTensor.

If the input tensor has rank R_in and N non-empty values, and new_shape - has length R_out, then input_indices has shape `[N, R_in]`, - input_shape has length R_in, output_indices has shape `[N, R_out]`, and - output_shape has length R_out.

sparseApplyAdadelta Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

accum_update: : Should be from a Variable().

-> Tensor v4 t

lr: Learning rate. Must be a scalar.

-> Tensor v5 t

rho: Decay factor. Must be a scalar.

-> Tensor v6 t

epsilon: Constant factor. Must be a scalar.

-> Tensor v7 t

grad: The gradient.

-> Tensor v8 tindices

indices: A vector of indices into the first dimension of var and accum.

-> Build (Tensor Ref t)

out: Same as "var".

var: Should be from a Variable().

dilation2DBackpropFilter Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

input: 4-D with shape `[batch, in_height, in_width, depth]`.

-> Tensor v2 t

filter: 3-D with shape `[filter_height, filter_width, depth]`.

-> Tensor v3 t

out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.

-> Tensor Value t

filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`.

Computes the gradient of morphological 2-D dilation with respect to the filter.

batchSelfAdjointEigV2 Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input

-> (Tensor Value t, Tensor Value t)

(e, v)

  • e
  • v

barrierIncompleteSize Source

Arguments

:: Tensor Ref ByteString

handle: The handle to a barrier.

-> Build (Tensor Value Int32)

size: The number of incomplete elements (i.e. those with some of their value - components not set) in the barrier.

Computes the number of incomplete elements in the given barrier.

fakeQuantWithMinMaxVars Source

Arguments

:: Tensor v1 Float

inputs

-> Tensor v2 Float

min

-> Tensor v3 Float

max

-> Tensor Value Float

outputs

Fake-quantize the inputs tensor of type float and shape `[b, h, w, d]` via

global float scalars min and max to outputs tensor of same shape as - inputs.

min; max
is the clamping range for the inputs data. Op divides this range - into 255 steps (total of 256 values), then replaces each inputs value with the - closest of the quantized step values.

This operation has a gradient and thus allows for training min and max values.

readVariableOp Source

Arguments

:: TensorType dtype 
=> ResourceHandle dtype

resource: handle to the resource in which to store the variable.

-> Build (Tensor Value dtype)

value

Reads the value of a variable.

The tensor returned by this operation is immutable.

The value returned by this operation is guaranteed to be influenced by all the - writes on which this operation depends directly or indirectly, and to not be - influenced by any of the writes which depend directly or indirectly on this - operation.

fusedBatchNormGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

y_backprop: A 4D Tensor for the gradient with respect to y.

-> Tensor v2 t

x: A 4D Tensor for input data.

-> Tensor v3 t

scale: A 1D Tensor for scaling factor, to scale the normalized x.

-> Tensor v4 t

reserve_space_1: A 1D Tensor for the computed batch mean, to be reused - in the gradient computation.

-> Tensor v5 t

reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance - in the cuDNN case), to be used in the gradient computation.

-> (Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)

(x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4)

  • x_backprop: A 4D Tensor for the gradient with respect to x.
  • scale_backprop: A 1D Tensor for the gradient with respect to scale.
  • offset_backprop: A 1D Tensor for the gradient with respect to offset.
  • reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
  • reserve_space_4: Unused placeholder to match the variance input - in FusedBatchNorm.

Gradient for batch normalization.

Note that the size of 4D Tensors are defined by either NHWC or NCHW. - The size of 1D Tensors matches the dimension C of the 4D Tensors.

paddingFIFOQueue Source

Arguments

:: Build (Tensor Ref ByteString)

handle: The handle to the queue.

A queue that produces elements in first-in first-out order.

Variable-size shapes are allowed by setting the corresponding shape dimensions - to 0 in the shape attr. In this case DequeueMany will pad up to the maximum - size of any given element in the minibatch. See below for details.

matrixInverse Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input: Shape is `[..., M, M]`.

-> Tensor Value t

output: Shape is `[..., M, M]`.

compatibility(numpy) - Equivalent to np.linalg.inv - end_compatibility

Computes the inverse of one or more square invertible matrices or their

adjoints (conjugate transposes).

The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - form square matrices. The output is a tensor of the same shape as the input - containing the inverse for all input submatrices `[..., :, :]`.

The op uses LU decomposition with partial pivoting to compute the inverses.

If a matrix is not invertible there is no guarantee what the op does. It - may detect the condition and raise an exception or it may simply return a - garbage result.

audioSummaryV2 Source

Arguments

:: Tensor v1 ByteString

tag: Scalar. Used to build the tag attribute of the summary values.

-> Tensor v2 Float

tensor: 2-D of shape `[batch_size, frames]`.

-> Tensor v3 Float

sample_rate: The sample rate of the signal in hertz.

-> Tensor Value ByteString

summary: Scalar. Serialized Summary protocol buffer.

Outputs a Summary protocol buffer with audio.

The summary has up to max_outputs summary values containing audio. The - audio is built from tensor which must be 3-D with shape `[batch_size, - frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are - assumed to be in the range of `[-1.0, 1.0]` with a sample rate of sample_rate.

The tag argument is a scalar Tensor of type string. It is used to - build the tag of the summary values:

  • If max_outputs is 1, the summary value tag is '*tag*/audio'.
  • If max_outputs is greater than 1, the summary value tags are - generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.

matrixDeterminant Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input: Shape is `[..., M, M]`.

-> Tensor Value t

output: Shape is `[...]`.

Computes the determinant of one ore more square matrices.

The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - form square matrices. The output is a tensor containing the determinants - for all input submatrices `[..., :, :]`.

writeFile Source

Arguments

:: Tensor v1 ByteString

filename: scalar. The name of the file to which we write the contents.

-> Tensor v2 ByteString

contents: scalar. The content to be written to the output file.

-> ControlNode 

Writes contents to the file at input filename. Creates file if not existing.

quantizedConcat Source

Arguments

:: TensorType t 
=> Tensor v1 Int32

concat_dim: 0-D. The dimension along which to concatenate. Must be in the - range [0, rank(values)).

-> [Tensor v2 t]

values: The N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

-> [Tensor v3 Float]

input_mins: The minimum scalar values for each of the input tensors.

-> [Tensor v4 Float]

input_maxes: The maximum scalar values for each of the input tensors.

-> (Tensor Value t, Tensor Value Float, Tensor Value Float)

(output, output_min, output_max)

  • output: A Tensor with the concatenation of values stacked along the - concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.
  • output_min: The float value that the minimum quantized output value represents.
  • output_max: The float value that the maximum quantized output value represents.

Concatenates quantized tensors along one dimension.

varHandleOp Source

Arguments

:: TensorType dtype 
=> Shape

shape: The (possibly partially specified) shape of this variable.

-> Build (ResourceHandle dtype)

resource

Creates a handle to a Variable resource.

stridedSliceAssign Source

Arguments

:: (TensorType t, TensorType index, OneOf `[Int32, Int64]` index) 
=> Tensor Ref t

ref

-> Tensor v2 index

begin

-> Tensor v3 index

end

-> Tensor v4 index

strides

-> Tensor v5 t

value

-> Build (Tensor Ref t)

output_ref

Assign value to the sliced l-value reference of ref.

The values of value are assigned to the positions in the variable - ref that are selected by the slice parameters. The slice parameters - `begin, end, strides, etc. work exactly as in StridedSlice.

NOTE this op currently does not support broadcasting and so value's - shape must be exactly the shape produced by the slice of ref.

varIsInitializedOp Source

Arguments

:: ResourceHandle dtype

resource: the input resource handle.

-> Build (Tensor Value Bool)

is_initialized: a scalar boolean which is true if the variable has been - initialized.

Checks whether a resource handle-based variable has been initialized.

sparseApplyRMSProp Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

ms: Should be from a Variable().

-> Tensor Ref t

mom: Should be from a Variable().

-> Tensor v4 t

lr: Scaling factor. Must be a scalar.

-> Tensor v5 t

rho: Decay rate. Must be a scalar.

-> Tensor v6 t

momentum

-> Tensor v7 t

epsilon: Ridge term. Must be a scalar.

-> Tensor v8 t

grad: The gradient.

-> Tensor v9 tindices

indices: A vector of indices into the first dimension of var, ms and mom.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' according to the RMSProp algorithm.

Note that in dense implementation of this algorithm, ms and mom will - update even if the grad is zero, but in this sparse implementation, ms - and mom will not update in iterations during which the grad is zero.

mean_square = decay * mean_square + (1-decay) * gradient ** 2 - Delta = learning_rate * gradient / sqrt(mean_square + epsilon)

ms <- rho * ms_{t-1} + (1-rho) * grad * grad - mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - var <- var - mom

batchCholesky Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input

-> Tensor Value t

output

tensorArrayGather Source

Arguments

:: TensorType dtype 
=> Tensor Ref ByteString

handle

-> Tensor v2 Int32

indices

-> Tensor v3 Float

flow_in

-> Build (Tensor Value dtype)

value

readerRestoreState Source

Arguments

:: Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Tensor v2 ByteString

state: Result of a ReaderSerializeState of a Reader with type - matching reader_handle.

-> Build ControlNode 

Restore a reader to a previously saved state.

Not all Readers support being restored, so this can produce an - Unimplemented error.

sqrtGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Computes the gradient for the sqrt of x wrt its input.

Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and dy - is the corresponding input gradient.

split Source

Arguments

:: TensorType t 
=> Int64

num_split: The number of ways to split. Must evenly divide - `value.shape[split_dim]`.

-> Tensor v1 Int32

split_dim: 0-D. The dimension along which to split. Must be in the range - `[0, rank(value))`.

-> Tensor v2 t

value: The tensor to split.

-> [Tensor Value t]

output: They are identically shaped tensors, whose shape matches that of value - except along split_dim, where their sizes are - `values.shape[split_dim] / num_split`.

Splits a tensor into num_split tensors along one dimension.

textLineReader Source

Arguments

:: Build (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

A Reader that outputs the lines of a file delimited by '\n'.

matrixBandPart Source

Arguments

:: TensorType t 
=> Tensor v1 t

input: Rank k tensor.

-> Tensor v2 Int64

num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire - lower triangle.

-> Tensor v3 Int64

num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep - entire upper triangle.

-> Tensor Value t

band: Rank k tensor of the same shape as input. The extracted banded tensor.

Copy a tensor setting everything outside a central band in each innermost matrix

to zero.

The band part is computed as follows: - Assume input has k dimensions `[I, J, K, ..., M, N]`, then the output is a - tensor with the same shape where

`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.

The indicator function

`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && - (num_upper < 0 || (n-m) <= num_upper)`.

For example:

```prettyprint - # if input is [[ 0, 1, 2, 3] - [-1, 0, 1, 2] - [-2, -1, 0, 1] - [-3, -2, -1, 0]],

tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] - [-1, 0, 1, 2] - [ 0, -1, 0, 1] - [ 0, 0, -1, 0]],

tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] - [-1, 0, 1, 0] - [-2, -1, 0, 1] - [ 0, -2, -1, 0]] - ```

Useful special cases:

```prettyprint - tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. - tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. - tf.matrix_band_part(input, 0, 0) ==> Diagonal. - ```

queueClose Source

Arguments

:: Tensor Ref ByteString

handle: The handle to a queue.

-> Build ControlNode 

Closes the given queue.

This operation signals that no more elements will be enqueued in the - given queue. Subsequent Enqueue(Many) operations will fail. - Subsequent Dequeue(Many) operations will continue to succeed if - sufficient elements remain in the queue. Subsequent Dequeue(Many) - operations that would block will fail immediately.

mergeV2Checkpoints Source

Arguments

:: Tensor v1 ByteString

checkpoint_prefixes: prefixes of V2 checkpoints to merge.

-> Tensor v2 ByteString

destination_prefix: scalar. The desired final prefix. Allowed to be the same - as one of the checkpoint_prefixes.

-> ControlNode 

V2 format specific: merges the metadata files of sharded checkpoints. The

result is one logical checkpoint, with one physical metadata file and renamed - data files.

Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.

If delete_old_dirs is true, attempts to delete recursively the dirname of each - path in the input checkpoint_prefixes. This is useful when those paths are non - user-facing temporary locations.

barrierReadySize Source

Arguments

:: Tensor Ref ByteString

handle: The handle to a barrier.

-> Build (Tensor Value Int32)

size: The number of complete elements (i.e. those with all of their value - components set) in the barrier.

Computes the number of complete elements in the given barrier.

randomShuffleQueue Source

Arguments

:: Build (Tensor Ref ByteString)

handle: The handle to the queue.

A queue that randomizes the order of elements.

notEqual Source

Returns the truth value of (x != y) element-wise.

  • NOTE*: NotEqual supports broadcasting. More about broadcasting - here

nonMaxSuppression Source

Arguments

:: Tensor v1 Float

boxes: A 2-D float tensor of shape `[num_boxes, 4]`.

-> Tensor v2 Float

scores: A 1-D float tensor of shape `[num_boxes]` representing a single - score corresponding to each box (each row of boxes).

-> Tensor v3 Int32

max_output_size: A scalar integer tensor representing the maximum number of - boxes to be selected by non max suppression.

-> Tensor Value Int32

selected_indices: A 1-D integer tensor of shape `[M]` representing the selected - indices from the boxes tensor, where `M <= max_output_size`.

Greedily selects a subset of bounding boxes in descending order of score,

pruning away boxes that have high intersection-over-union (IOU) overlap - with previously selected boxes. Bounding boxes are supplied as - [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any - diagonal pair of box corners and the coordinates can be provided as normalized - (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm - is agnostic to where the origin is in the coordinate system. Note that this - algorithm is invariant to orthogonal transformations and translations - of the coordinate system; thus translating or reflections of the coordinate - system result in the same boxes being selected by the algorithm.

The output of this operation is a set of integers indexing into the input - collection of bounding boxes representing the selected boxes. The bounding - box coordinates corresponding to the selected indices can then be obtained - using the `tf.gather operation`. For example:

selected_indices = tf.image.non_max_suppression( - boxes, scores, max_output_size, iou_threshold) - selected_boxes = tf.gather(boxes, selected_indices)

tensorArrayWrite Source

Arguments

:: TensorType t 
=> Tensor Ref ByteString

handle

-> Tensor v2 Int32

index

-> Tensor v3 t

value

-> Tensor v4 Float

flow_in

-> Build (Tensor Value Float)

flow_out

quantizeAndDequantize Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input: Tensor to quantize and then dequantize.

-> Tensor Value t

output

Quantizes then dequantizes a tensor.

This op simulates the precision loss from the quantized forward pass by: - 1. Quantizing the tensor to fixed point numbers, which should match the target - quantization method when it is used in inference. - 2. Dequantizing it back to floating point numbers for the following ops, most - likely matmul.

There are different ways to quantize. This version does not use the full range - of the output type, choosing to elide the lowest possible value for symmetry - (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit - quantization), so that 0.0 maps to 0.

To perform this op, we first find the range of values in our tensor. The range - we use is always centered on 0, so we find m such that

  1. m = max(abs(input_min), abs(input_max)) if range_given is true,
  2. m = max(max(abs(min_elem(input)), abs(max_elem(input))) otherwise.

Our input tensor range is then [-m, m].

Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed]. - If signed_input is true, this is

min_fixed, max_fixed
=
-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1
.

Otherwise, if signed_input is false, the fixed-point range is

min_fixed, max_fixed
= [0, (1 << num_bits) - 1].

From this we compute our scaling factor, s:

s = (max_fixed - min_fixed) / (2 * m).

Now we can quantize and dequantize the elements of our tensor. An element e - is transformed into e':

e' = (e * s).round_to_nearest() / s.

Note that we have a different number of buckets in the signed vs. unsigned - cases. For example, if num_bits == 8, we get 254 buckets in the signed case - vs. 255 in the unsigned case.

For example, suppose num_bits = 8 and m = 1. Then

min_fixed, max_fixed
= [-127, 127], and - s = (127 + 127) / 2 = 127.

Given the vector {-1, -0.5, 0, 0.3}, this is quantized to - {-127, -63, 0, 38}, and dequantized to {-1, -63.0127, 0, 38.0127}.

readerRead Source

Arguments

:: Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Tensor Ref ByteString

queue_handle: Handle to a Queue, with string work items.

-> Build (Tensor Value ByteString, Tensor Value ByteString)

(key, value)

  • key: A scalar.
  • value: A scalar.

Returns the next record (key, value pair) produced by a Reader.

Will dequeue from the input queue if necessary (e.g. when the - Reader needs to start reading from a new file since it has finished - with the previous file).

matrixTriangularSolve Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

matrix: Shape is `[..., M, M]`.

-> Tensor v2 t

rhs: Shape is `[..., M, K]`.

-> Tensor Value t

output: Shape is `[..., M, K]`.

Solves systems of linear equations with upper or lower triangular matrices by

backsubstitution.

matrix is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form - square matrices. If lower is True then the strictly upper triangular part - of each inner-most matrix is assumed to be zero and not accessed. - If lower is False then the strictly lower triangular part of each inner-most - matrix is assumed to be zero and not accessed. - rhs is a tensor of shape `[..., M, K]`.

The output is a tensor of shape `[..., M, K]`. If adjoint is - True then the innermost matrices in output` satisfy matrix equations - `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. - If adjoint is False then the strictly then the innermost matrices in - output satisfy matrix equations - `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.

tensorArraySplitV2 Source

Arguments

:: TensorType t 
=> Tensor v1 ByteString

handle: The handle to a TensorArray.

-> Tensor v2 t

value: The concatenated tensor to write to the TensorArray.

-> Tensor v3 Int64

lengths: The vector of lengths, how to split the rows of value into the - TensorArray.

-> Tensor v4 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> Tensor Value Float

flow_out: A float scalar that enforces proper chaining of operations.

Split the data from the input value into TensorArray elements.

Assuming that lengths takes on values

```(n0, n1, ..., n(T-1))```

and that value has shape

```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,

this splits values into a TensorArray with T tensors.

TensorArray index t will be the subtensor of values with starting position

```(n0 + n1 + ... + n(t-1), 0, 0, ...)```

and having size

```nt x d0 x d1 x ...```

restore Source

Arguments

:: TensorType dt 
=> Tensor v1 ByteString

file_pattern: Must have a single element. The pattern of the files from - which we read the tensor.

-> Tensor v2 ByteString

tensor_name: Must have a single element. The name of the tensor to be - restored.

-> Tensor Value dt

tensor: The restored tensor.

Restores a tensor from checkpoint files.

Reads a tensor stored in one or several files. If there are several files (for - instance because a tensor was saved as slices), file_pattern may contain - wildcard symbols (* and ?) in the filename portion only, not in the - directory portion.

If a file_pattern matches several files, preferred_shard can be used to hint - in which file the requested tensor is likely to be found. This op will first - open the file at index preferred_shard in the list of matching files and try - to restore tensors from that file. Only if some tensors or tensor slices are - not found in that first file, then the Op opens all the files. Setting - preferred_shard to match the value passed as the shard input - of a matching Save Op may speed up Restore. This attribute only affects - performance, not correctness. The default value -1 means files are processed in - order.

See also RestoreSlice.

quantizedReluX Source

Arguments

:: (TensorType tinput, OneOf `[Int16, Int32, Word16, Word8]` tinput, TensorType out_type, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v1 tinput

features

-> Tensor v2 Float

max_value

-> Tensor v3 Float

min_features: The float value that the lowest quantized value represents.

-> Tensor v4 Float

max_features: The float value that the highest quantized value represents.

-> (Tensor Value out_type, Tensor Value Float, Tensor Value Float)

(activations, min_activations, max_activations)

  • activations: Has the same output shape as "features".
  • min_activations: The float value that the lowest quantized value represents.
  • max_activations: The float value that the highest quantized value represents.

Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`

accumulatorTakeGradient Source

Arguments

:: (TensorType dtype, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> Tensor Ref ByteString

handle: The handle to an accumulator.

-> Tensor v2 Int32

num_required: Number of gradients required before we return an aggregate.

-> Build (Tensor Value dtype)

average: The average of the accumulated gradients.

Extracts the average gradient in the given ConditionalAccumulator, provided

that sufficient (i.e., more than num_required) gradients have been accumulated. - The op blocks until sufficient gradients have been accumulated. - If the accumulator has already aggregated more than num_required gradients, it - returns the average of the accumulated gradients. - Also automatically increments the recorded global_step in the accumulator by 1, - and resets the aggregate to 0.

floorMod Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns element-wise remainder of division. When `x < 0` xor `y < 0` is

true, this follows Python semantics in that the result here is consistent - with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.

  • NOTE*: FloorMod supports broadcasting. More about broadcasting - here

matchingFiles Source

Arguments

:: Tensor v1 ByteString

pattern: A (scalar) shell wildcard pattern.

-> Tensor Value ByteString

filenames: A vector of matching filenames.

Returns the set of files matching a pattern.

Note that this routine only supports wildcard characters in the - basename portion of the pattern, not in the directory portion.

maxPool Source

Arguments

:: (TensorType t, OneOf `[Word16, Float]` t) 
=> Tensor v1 t

input: 4-D input to pool over.

-> Tensor Value t

output: The max pooled output tensor.

Performs max pooling on the input.

computeAccidentalHits Source

Arguments

:: Int64

num_true: Number of true labels per context.

-> Tensor v1 Int64

true_classes: The true_classes output of UnpackSparseLabels.

-> Tensor v2 Int64

sampled_candidates: The sampled_candidates output of CandidateSampler.

-> (Tensor Value Int32, Tensor Value Int64, Tensor Value Float)

(indices, ids, weights)

  • indices: A vector of indices corresponding to rows of true_candidates.
  • ids: A vector of IDs of positions in sampled_candidates that match a true_label - for the row with the corresponding index in indices.
  • weights: A vector of the same length as indices and ids, in which each element - is -FLOAT_MAX.

Computes the ids of the positions in sampled_candidates that match true_labels.

When doing log-odds NCE, the result of this op should be passed through a - SparseToDense op, then added to the logits of the sampled candidates. This has - the effect of removing the sampled labels that match the true labels by - making the classifier sure that they are sampled labels.

deserializeManySparse Source

Arguments

:: TensorType dtype 
=> Tensor v1 ByteString

serialized_sparse: 2-D, The N serialized SparseTensor objects. - Must have 3 columns.

-> (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

(sparse_indices, sparse_values, sparse_shape)

  • sparse_indices
  • sparse_values
  • sparse_shape

Deserialize and concatenate SparseTensors from a serialized minibatch.

The input serialized_sparse must be a string matrix of shape `[N x 3]` where - N is the minibatch size and the rows correspond to packed outputs of - SerializeSparse. The ranks of the original SparseTensor objects - must all match. When the final SparseTensor is created, it has rank one - higher than the ranks of the incoming SparseTensor objects - (they have been concatenated along a new row dimension).

The output SparseTensor object's shape values for all dimensions but the - first are the max across the input SparseTensor objects' shape values - for the corresponding dimensions. Its first shape value is N, the minibatch - size.

The input SparseTensor objects' indices are assumed ordered in - standard lexicographic order. If this is not the case, after this - step run SparseReorder to restore index ordering.

For example, if the serialized input is a `[2 x 3]` matrix representing two - original SparseTensor objects:

index = [ 0] - [10] - [20] - values = [1, 2, 3] - shape = [50]

and

index = [ 2] - [10] - values = [4, 5] - shape = [30]

then the final deserialized SparseTensor will be:

index = [0 0] - [0 10] - [0 20] - [1 2] - [1 10] - values = [1, 2, 3, 4, 5] - shape = [2 50]

cropAndResize Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. - Both image_height and image_width need to be positive.

-> Tensor v2 Float

boxes: A 2-D tensor of shape `[num_boxes, 4]`. The i-th row of the tensor - specifies the coordinates of a box in the `box_ind[i]` image and is specified - in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of - y is mapped to the image coordinate at `y * (image_height - 1)`, so as the - `[0, 1]` interval of normalized image height is mapped to - `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in - which case the sampled crop is an up-down flipped version of the original - image. The width dimension is treated similarly. Normalized coordinates - outside the `[0, 1]` range are allowed, in which case we use - extrapolation_value to extrapolate the input image values.

-> Tensor v3 Int32

box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - The value of `box_ind[i]` specifies the image that the i-th box refers to.

-> Tensor v4 Int32

crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All - cropped image patches are resized to this size. The aspect ratio of the image - content is not preserved. Both crop_height and crop_width need to be - positive.

-> Tensor Value Float

crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.

Extracts crops from the input image tensor and bilinearly resizes them (possibly

with aspect ratio change) to a common output size specified by crop_size. This - is more general than the crop_to_bounding_box op which extracts a fixed size - slice from the input image and does not allow resizing or aspect ratio change.

Returns a tensor with crops from the input image at positions defined at the - bounding box locations in boxes. The cropped boxes are all resized (with - bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The - result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.

scatterUpdate Source

Arguments

:: (TensorType t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v3 t

updates: A tensor of updated values to store in ref.

-> Build (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

Applies sparse updates to a variable reference.

This operation computes

# Scalar indices - ref[indices, ...] = updates[...]

# Vector indices (for each i) - ref[indices[i], ...] = updates[i, ...]

# High rank indices (for each i, ..., j) - ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]

This operation outputs ref after the update is done. - This makes it easier to chain operations that need to use the reset value.

If values in ref is to be updated more than once, because there are - duplicate entires in indices, the order at which the updates happen - for each value is undefined.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterUpdate.png" alt - /div

randomGamma Source

Arguments

:: (TensorType s, OneOf `[Int32, Int64]` s, TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 s

shape: 1-D integer tensor. Shape of independent samples to draw from each - distribution described by the shape parameters given in alpha.

-> Tensor v2 t

alpha: A tensor in which each scalar is a "shape" parameter describing the - associated gamma distribution.

-> Build (Tensor Value t)

output: A tensor with shape `shape + shape(alpha)`. Each slice - `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for - `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.

Outputs random values from the Gamma distribution(s) described by alpha.

This op uses the algorithm by Marsaglia et al. to acquire samples via - transformation-rejection from pairs of uniform and normal random variables. - See http://dl.acm.org/citation.cfm?id=358414

batchMatrixSolve Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

matrix

-> Tensor v2 t

rhs

-> Tensor Value t

output

batchMatrixBandPart Source

Arguments

:: TensorType t 
=> Tensor v1 t

input

-> Tensor v2 Int64

num_lower

-> Tensor v3 Int64

num_upper

-> Tensor Value t

band

all Source

Arguments

:: (TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 Bool

input: The tensor to reduce.

-> Tensor v2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Value Bool

output: The reduced tensor.

Computes the "logical and" of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless - keep_dims is true, the rank of the tensor is reduced by 1 for each entry in - reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

readerNumRecordsProduced Source

Arguments

:: Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Build (Tensor Value Int64)

records_produced

Returns the number of records this Reader has produced.

This is the same as the number of ReaderRead executions that have - succeeded.

stackPop Source

Arguments

:: TensorType elem_type 
=> Tensor Ref ByteString

handle: The handle to a stack.

-> Build (Tensor Value elem_type)

elem: The tensor that is popped from the top of the stack.

Pop the element at the top of the stack.

tensorArrayScatterV2 Source

Arguments

:: TensorType t 
=> Tensor v1 ByteString

handle: The handle to a TensorArray.

-> Tensor v2 Int32

indices: The locations at which to write the tensor elements.

-> Tensor v3 t

value: The concatenated tensor to write to the TensorArray.

-> Tensor v4 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> Tensor Value Float

flow_out: A float scalar that enforces proper chaining of operations.

Scatter the data from the input value into specific TensorArray elements.

indices must be a vector, its length must match the first dim of value.

rGBToHSV Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.

-> Tensor Value t

output: images converted to HSV.

Converts one or more images from RGB to HSV.

Outputs a tensor of the same shape as the images tensor, containing the HSV - value of the pixels. The output is only well defined if the value in images - are in `[0,1]`.

`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and - `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 - corresponds to pure red, hue 13 is pure green, and 23 is pure blue.

serializeManySparse Source

Arguments

:: TensorType t 
=> Tensor v1 Int64

sparse_indices: 2-D. The indices of the minibatch SparseTensor.

-> Tensor v2 t

sparse_values: 1-D. The values of the minibatch SparseTensor.

-> Tensor v3 Int64

sparse_shape: 1-D. The shape of the minibatch SparseTensor.

-> Tensor Value ByteString

serialized_sparse

Serialize an N-minibatch SparseTensor into an `[N, 3]` string Tensor.

The SparseTensor must have rank R greater than 1, and the first dimension - is treated as the minibatch dimension. Elements of the SparseTensor - must be sorted in increasing order of this first dimension. The serialized - SparseTensor objects going into each row of serialized_sparse will have - rank `R-1`.

The minibatch size N is extracted from `sparse_shape[0]`.

initializeTableFromTextFile Source

Arguments

:: Int64

key_index: Column index in a line to get the table key values from.

-> Int64

value_index: Column index that represents information of a line to get the table - value values from.

-> Tensor Ref ByteString

table_handle: Handle to a table which will be initialized.

-> Tensor v2 ByteString

filename: Filename of a vocabulary text file.

-> Build ControlNode 

Initializes a table from a text file.

It inserts one key-value pair into the table for each line of the file. - The key and value is extracted from the whole line content, elements from the - split line based on delimiter or the line number (starting from zero). - Where to extract the key and value from a line is specified by key_index and - value_index.

  • A value of -1 means use the line number(starting from zero), expects int64.
  • A value of -2 means use the whole line content, expects string.
  • A value >= 0 means use the index (starting at zero) of the split line based - on delimiter.

decodePng Source

Arguments

:: (TensorType dtype, OneOf `[Word16, Word8]` dtype) 
=> Tensor v1 ByteString

contents: 0-D. The PNG-encoded image.

-> Tensor Value dtype

image: 3-D with shape `[height, width, channels]`.

Decode a PNG-encoded image to a uint8 or uint16 tensor.

The attr channels indicates the desired number of color channels for the - decoded image.

Accepted values are:

  • 0: Use the number of channels in the PNG-encoded image.
  • 1: output a grayscale image.
  • 3: output an RGB image.
  • 4: output an RGBA image.

If needed, the PNG-encoded image is transformed to match the requested number - of color channels.

tensorArraySizeV2 Source

Arguments

:: Tensor v1 ByteString

handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

-> Tensor v2 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> Tensor Value Int32

size: The current size of the TensorArray.

Get the current size of the TensorArray.

div Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns x / y element-wise.

  • NOTE*: Div supports broadcasting. More about broadcasting - here

logUniformCandidateSampler Source

Arguments

:: Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled - candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

-> Tensor v1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is - the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing - the number of times each candidate is expected to occur in a batch - of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled - candidate representing the number of times the candidate is expected - to occur in a batch of sampled candidates. If unique=true, then this is a - probability.

Generates labels for candidate sampling with a log-uniform distribution.

See explanations of candidate sampling and the data formats at - go/candidate-sampling.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the - possibility of efficient dense matrix multiplication. The disadvantage is that - the sampled candidates must be chosen independently of the context and of the - true labels.

barrier Source

Arguments

:: Build (Tensor Ref ByteString)

handle: The handle to the barrier.

Defines a barrier that persists across different graph executions.

A barrier represents a key-value map, where each key is a string, and - each value is a tuple of tensors.

At runtime, the barrier contains complete and incomplete - elements. A complete element has defined tensors for all components of - its value tuple, and may be accessed using BarrierTakeMany. An - incomplete element has some undefined components in its value tuple, - and may be updated using BarrierInsertMany.

createVariableOp Source

Arguments

:: TensorType dtype 
=> ResourceHandle dtype

resource: handle to the resource in which to store the variable.

-> Tensor v2 dtype

value: the value to set the new tensor to use.

-> Build ControlNode 

Creates a variable resource.

accumulatorApplyGradient Source

Arguments

:: (TensorType dtype, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> Tensor Ref ByteString

handle: The handle to a accumulator.

-> Tensor v2 Int64

local_step: The local_step value at which the gradient was computed.

-> Tensor v3 dtype

gradient: A tensor of the gradient to be accumulated.

-> Build ControlNode 

Applies a gradient to a given accumulator. Does not add if local_step is lesser

than the accumulator's global_step.

randomStandardNormal Source

Arguments

:: (TensorType dtype, OneOf `[Word16, Double, Float]` dtype, TensorType t, OneOf `[Int32, Int64]` t) 
=> Tensor v1 t

shape: The shape of the output tensor.

-> Build (Tensor Value dtype)

output: A tensor of the specified shape filled with random normal values.

Outputs random values from a normal distribution.

The generated values will have mean 0 and standard deviation 1.

parameterizedTruncatedNormal Source

Arguments

:: (TensorType dtype, OneOf `[Word16, Double, Float]` dtype, TensorType t, OneOf `[Int32, Int64]` t) 
=> Tensor v1 t

shape: The shape of the output tensor. Batches are indexed by the 0th dimension.

-> Tensor v2 dtype

means: The mean parameter of each batch.

-> Tensor v3 dtype

stdevs: The standard deviation parameter of each batch. Must be greater than 0.

-> Tensor v4 dtype

minvals: The minimum cutoff. May be -infinity.

-> Tensor v5 dtype

maxvals: The maximum cutoff. May be +infinity, and must be more than the minval - for each batch.

-> Build (Tensor Value dtype)

output: A matrix of shape num_batches x samples_per_batch, filled with random - truncated normal values using the parameters for each row.

Outputs random values from a normal distribution. The parameters may each be a

scalar which applies to the entire output, or a vector of length shape[0] which - stores the parameters for each batch.

accumulatorSetGlobalStep Source

Arguments

:: Tensor Ref ByteString

handle: The handle to an accumulator.

-> Tensor v2 Int64

new_global_step: The new global_step value to set.

-> Build ControlNode 

Updates the accumulator with a new value for global_step. Logs warning if the

accumulator's value is already higher than new_global_step.

resizeBilinear Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

-> Tensor Value Float

resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

Resize images to size using bilinear interpolation.

Input images can be of different types but output images are always float.

quantizeV2 Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Word16, Word8]` t) 
=> Tensor v1 Float

input

-> Tensor v2 Float

min_range: The minimum scalar value possibly produced for the input.

-> Tensor v3 Float

max_range: The maximum scalar value possibly produced for the input.

-> (Tensor Value t, Tensor Value Float, Tensor Value Float)

(output, output_min, output_max)

  • output: The quantized data produced from the float input.
  • output_min: The actual minimum scalar value used for the output.
  • output_max: The actual maximum scalar value used for the output.

Quantize the input tensor of type float to output tensor of type T.

min_range, max_range
are scalar floats that specify the range for - the input data. The mode attribute controls exactly which calculations are - used to convert the float values to their quantized equivalents.

In MIN_COMBINED mode, each value of the tensor will undergo the following:

``` - out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) - if T == qint8, out[i] -= (range(T) + 1) / 2.0 - ``` - here `range(T) = numeric_limitsT::max() - numeric_limitsT::min()`

  • MIN_COMBINED Mode Example*

Assume the input is type float and has a possible range of [0.0, 6.0] and the - output type is quint8 ([0, 255]). The min_range and max_range values should be - specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each - value of the input by 255/6 and cast to quint8.

If the output type was qint8 ([-128, 127]), the operation will additionally - subtract each value by 128 prior to casting, so that the range of values aligns - with the range of qint8.

If the mode is MIN_FIRST, then this approach is used:

``` - number_of_steps = 1 << (# of bits in T) - range_adjust = number_of_steps / (number_of_steps - 1) - range = (range_max - range_min) * range_adjust - range_scale = number_of_steps / range - quantized = round(input * range_scale) - round(range_min * range_scale) + - numeric_limitsT::min() - quantized = max(quantized, numeric_limitsT::min()) - quantized = min(quantized, numeric_limitsT::max()) - ```

The biggest difference between this and MIN_COMBINED is that the minimum range - is rounded first, before it's subtracted from the rounded value. With - MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing - and dequantizing will introduce a larger and larger error.

One thing to watch out for is that the operator may choose to adjust the - requested minimum and maximum values slightly during the quantization process, - so you should always use the output ports as the range for further calculations. - For example, if the requested minimum and maximum values are close to equal, - they will be separated by a small epsilon value to prevent ill-formed quantized - buffers from being created. Otherwise, you can end up with buffers where all the - quantized values map to the same float value, which causes problems for - operations that have to perform further calculations on them.

decodeJpeg Source

Arguments

:: Tensor v1 ByteString

contents: 0-D. The JPEG-encoded image.

-> Tensor Value Word8

image: 3-D with shape `[height, width, channels]`..

Decode a JPEG-encoded image to a uint8 tensor.

The attr channels indicates the desired number of color channels for the - decoded image.

Accepted values are:

  • 0: Use the number of channels in the JPEG-encoded image.
  • 1: output a grayscale image.
  • 3: output an RGB image.

If needed, the JPEG-encoded image is transformed to match the requested number - of color channels.

The attr ratio allows downscaling the image by an integer factor during - decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than - downscaling the image later.

pow Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Computes the power of one value to another.

Given a tensor x and a tensor y, this operation computes \(x^y\) for - corresponding elements in x and y. For example:

``` - # tensor x is [[2, 2]], [3, 3]] - # tensor y is [[8, 16], [2, 3]] - tf.pow(x, y) ==> [[256, 65536], [9, 27]] - ```

loopCond Source

Arguments

:: Tensor v1 Bool

input: A boolean scalar, representing the branch predicate of the Switch op.

-> Tensor Value Bool

output: The same tensor as input.

Forwards the input to the output.

This operator represents the loop termination condition used by the - "pivot" switches of a loop.

readFile Source

Arguments

:: Tensor v1 ByteString

filename

-> Tensor Value ByteString

contents

Reads and outputs the entire contents of the input filename.

imag Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float]` t, TensorType tout, OneOf `[Double, Float]` tout) 
=> Tensor v1 t

input

-> Tensor Value tout

output

Returns the imaginary part of a complex number.

Given a tensor input of complex numbers, this operation returns a tensor of - type float that is the imaginary part of each element in input. All - elements in input must be complex numbers of the form \(a + bj\), where *a* - is the real part and *b* is the imaginary part returned by this operation.

For example:

``` - # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] - tf.imag(input) ==> [4.75, 5.75] - ```

tensorArrayGrad Source

Arguments

:: Tensor v1 ByteString

handle

-> Tensor v2 Float

flow_in

-> Build (Tensor Ref ByteString)

grad_handle

histogramSummary Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 ByteString

tag: Scalar. Tag to use for the Value.

-> Tensor v2 t

values: Any shape. Values to use to build the histogram.

-> Tensor Value ByteString

summary: Scalar. Serialized Summary protocol buffer.

Outputs a Summary protocol buffer with a histogram.

The generated - `Summary` - has one summary value containing a histogram for values.

This op reports an InvalidArgument error if any value is not finite.

conv3DBackpropInputV2 Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 Int32

input_sizes: An integer vector representing the tensor shape of input, - where input is a 5-D - `[batch, depth, rows, cols, in_channels]` tensor.

-> Tensor v2 t

filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - in_channels must match between input and filter.

-> Tensor v3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

-> Tensor Value t

output

Computes the gradients of 3-D convolution with respect to the input.

resizeBilinearGrad Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 Float

grads: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v2 t

original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, - The image tensor that was resized.

-> Tensor Value t

output: 4-D with shape `[batch, orig_height, orig_width, channels]`. - Gradients with respect to the input image. Input image must have been - float or double.

Computes the gradient of bilinear interpolation.

addManySparseToTensorsMap Source

Arguments

:: TensorType t 
=> Tensor v1 Int64

sparse_indices: 2-D. The indices of the minibatch SparseTensor. - `sparse_indices[:, 0]` must be ordered values in `[0, N)`.

-> Tensor v2 t

sparse_values: 1-D. The values of the minibatch SparseTensor.

-> Tensor v3 Int64

sparse_shape: 1-D. The shape of the minibatch SparseTensor. - The minibatch size `N == sparse_shape[0]`.

-> Build (Tensor Value Int64)

sparse_handles: 1-D. The handles of the SparseTensor now stored in the - SparseTensorsMap. Shape: `[N]`.

Add an N-minibatch SparseTensor to a SparseTensorsMap, return N handles.

A SparseTensor of rank R is represented by three tensors: sparse_indices, - sparse_values, and sparse_shape, where

```sparse_indices.shape[1] == sparse_shape.shape[0] == R```

An N-minibatch of SparseTensor objects is represented as a SparseTensor - having a first sparse_indices column taking values between `[0, N)`, where - the minibatch size `N == sparse_shape[0]`.

The input SparseTensor must have rank R greater than 1, and the first - dimension is treated as the minibatch dimension. Elements of the SparseTensor - must be sorted in increasing order of this first dimension. The stored - SparseTensor objects pointed to by each row of the output sparse_handles - will have rank `R-1`.

The SparseTensor values can then be read out as part of a minibatch by passing - the given keys as vector elements to TakeManySparseFromTensorsMap. To ensure - the correct SparseTensorsMap is accessed, ensure that the same - container and shared_name are passed to that Op. If no shared_name - is provided here, instead use the *name* of the Operation created by calling - AddManySparseToTensorsMap as the shared_name passed to - TakeManySparseFromTensorsMap. Ensure the Operations are colocated.

batchIFFT Source

Arguments

:: Tensor v1 (Complex Float)

input

-> Tensor Value (Complex Float)

output

batchMatrixDeterminant Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input

-> Tensor Value t

output

deleteSessionTensor Source

Arguments

:: Tensor v1 ByteString

handle: The handle for a tensor stored in the session state.

-> ControlNode 

Delete the tensor specified by its handle in the session.

lookupTableSize Source

Arguments

:: Tensor Ref ByteString

table_handle: Handle to the table.

-> Build (Tensor Value Int64)

size: Scalar that contains number of elements in the table.

Computes the number of elements in the given table.

relu Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

features

-> Tensor Value t

activations

Computes rectified linear: `max(features, 0)`.

dynamicStitch Source

Arguments

:: TensorType t 
=> [Tensor v1 Int32]

indices

-> [Tensor v2 t]

data

-> Tensor Value t

merged

Interleave the values from the `data` tensors into a single tensor.

Builds a merged tensor such that

```python - merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] - ```

For example, if each `indices[m]` is scalar or vector, we have

```python - # Scalar indices: - merged[indices[m], ...] = data[m][...]

# Vector indices: - merged[indices[m][i], ...] = data[m][i, ...] - ```

Each `data[i].shape` must start with the corresponding `indices[i].shape`, - and the rest of `data[i].shape` must be constant w.r.t. i. That is, we - must have `data[i].shape = indices[i].shape + constant`. In terms of this - constant, the output shape is

merged.shape = [max(indices)] + constant

Values are merged in order, so if an index appears in both `indices[m][i]` and - `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the - merged result.

For example:

```python - indices[0] = 6 - indices[1] = [4, 1] - indices[2] = [[5, 2], [0, 3]] - data[0] = [61, 62] - data[1] = [[41, 42], [11, 12]] - data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] - merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], - [51, 52], [61, 62]] - ```

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/DynamicStitch.png" alt - /div

lookupTableFind Source

Arguments

:: (TensorType tin, TensorType tout) 
=> Tensor Ref ByteString

table_handle: Handle to the table.

-> Tensor v2 tin

keys: Any shape. Keys to look up.

-> Tensor v3 tout

default_value

-> Build (Tensor Value tout)

values: Same shape as keys. Values found in the table, or default_values - for missing keys.

Looks up keys in a table, outputs the corresponding values.

The tensor keys must of the same type as the keys of the table. - The output values is of the type of the table values.

The scalar default_value is the value output for keys not present in the - table. It must also be of the same type as the table values.

sampleDistortedBoundingBox Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word8]` t) 
=> Tensor v1 t

image_size: 1-D, containing `[height, width, channels]`.

-> Tensor v2 Float

bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes - associated with the image.

-> Build (Tensor Value t, Tensor Value t, Tensor Value Float)

(begin, size, bboxes)

  • begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to - `tf.slice`.
  • size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to - `tf.slice`.
  • bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. - Provide as input to `tf.image.draw_bounding_boxes`.

Generate a single randomly distorted bounding box for an image.

Bounding box annotations are often supplied in addition to ground-truth labels - in image recognition or object localization tasks. A common technique for - training such a system is to randomly distort an image while preserving - its content, i.e. *data augmentation*. This Op outputs a randomly distorted - localization of an object, i.e. bounding box, given an image_size, - bounding_boxes and a series of constraints.

The output of this Op is a single bounding box that may be used to crop the - original image. The output is returned as 3 tensors: begin, size and - bboxes. The first 2 tensors can be fed directly into `tf.slice` to crop the - image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize - what the bounding box looks like.

Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The - bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and - height of the underlying image.

For example,

```python - # Generate a single distorted bounding box. - begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( - tf.shape(image), - bounding_boxes=bounding_boxes)

# Draw the bounding box in an image summary. - image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), - bbox_for_draw) - tf.image_summary(images_with_box, image_with_box)

# Employ the bounding box to distort the image. - distorted_image = tf.slice(image, begin, size) - ```

Note that if no bounding box information is available, setting - `use_image_if_no_bounding_boxes = true` will assume there is a single implicit - bounding box covering the whole image. If use_image_if_no_bounding_boxes is - false and no bounding boxes are supplied, an error is raised.

splitV Source

Arguments

:: (TensorType t, TensorType tlen, OneOf `[Int32, Int64]` tlen) 
=> Int64

num_split

-> Tensor v1 t

value: The tensor to split.

-> Tensor v2 tlen

size_splits: list containing the sizes of each output tensor along the split - dimension. Must sum to the dimension of value along split_dim. - Can contain one -1 indicating that dimension is to be inferred.

-> Tensor v3 Int32

split_dim: 0-D. The dimension along which to split. Must be in the range - `[0, rank(value))`.

-> [Tensor Value t]

output: Tensors whose shape matches that of value - except along split_dim, where their sizes are - `size_splits[i]`.

Splits a tensor into num_split tensors along one dimension.

fusedPadConv2D Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

input: 4-D with shape `[batch, in_height, in_width, in_channels]`.

-> Tensor v2 Int32

paddings: A two-column matrix specifying the padding sizes. The number of - rows must be the same as the rank of input.

-> Tensor v3 t

filter: 4-D with shape - `[filter_height, filter_width, in_channels, out_channels]`.

-> Tensor Value t

output

Performs a padding as a preprocess during a convolution.

Similar to FusedResizeAndPadConv2d, this op allows for an optimized - implementation where the spatial padding transformation stage is fused with the - im2col lookup, but in this case without the bilinear filtering required for - resizing. Fusing the padding prevents the need to write out the intermediate - results as whole tensors, reducing memory pressure, and we can get some latency - gains by merging the transformation calculations. - The data_format attribute for Conv2D isn't supported by this op, and NHWC - order is used instead. - Internally this op uses a single per-graph scratch buffer, which means that it - will block if multiple versions are being run in parallel. This is because this - operator is primarily an optimization to minimize memory usage.

barrierInsertMany Source

Arguments

:: TensorType t 
=> Int64

component_index: The component of the barrier elements that is being assigned.

-> Tensor Ref ByteString

handle: The handle to a barrier.

-> Tensor v2 ByteString

keys: A one-dimensional tensor of keys, with length n.

-> Tensor v3 t

values: An any-dimensional tensor of values, which are associated with the - respective keys. The 0th dimension must have length n.

-> Build ControlNode 

For each key, assigns the respective value to the specified component.

If a key is not found in the barrier, this operation will create a new - incomplete element. If a key is found in the barrier, and the element - already has a value at component_index, this operation will fail with - INVALID_ARGUMENT, and leave the barrier in an undefined state.

abort :: ControlNode Source

Raise a exception to abort the process when called.

Returns nothing but an exception.

maxPoolWithArgmax Source

Arguments

:: (TensorType targmax, OneOf `[Int32, Int64]` targmax, TensorType t, OneOf `[Word16, Float]` t) 
=> Tensor v1 t

input: 4-D with shape `[batch, height, width, channels]`. Input to pool over.

-> (Tensor Value t, Tensor Value targmax)

(output, argmax)

  • output: The max pooled output tensor.
  • argmax: 4-D. The flattened indices of the max values chosen for each output.

Performs max pooling on the input and outputs both max values and indices.

The indices in argmax are flattened, so that a maximum value at position - `[b, y, x, c]` becomes flattened index - `((b * height + y) * width + x) * channels + c`.

refEnter Source

Arguments

:: TensorType t 
=> Tensor Ref t

data: The tensor to be made available to the child frame.

-> Build (Tensor Ref t)

output: The same tensor as `data`.

Creates or finds a child frame, and makes `data` available to the child frame.

The unique frame_name is used by the Executor to identify frames. If - is_constant is true, output is a constant in the child frame; otherwise - it may be changed in the child frame. At most parallel_iterations iterations - are run in parallel in the child frame.

dequantize Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Word16, Word8]` t) 
=> Tensor v1 t

input

-> Tensor v2 Float

min_range: The minimum scalar value possibly produced for the input.

-> Tensor v3 Float

max_range: The maximum scalar value possibly produced for the input.

-> Tensor Value Float

output

Dequantize the input tensor into a float Tensor.

min_range, max_range
are scalar floats that specify the range for - the input data. The mode attribute controls exactly which calculations are - used to convert the float values to their quantized equivalents.

In MIN_COMBINED mode, each value of the tensor will undergo the following:

``` - if T == qint8, in[i] += (range(T) + 1)/ 2.0 - out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) - ``` - here `range(T) = numeric_limitsT::max() - numeric_limitsT::min()`

  • MIN_COMBINED Mode Example*

If the input comes from a QuantizedRelu6, the output type is - quint8 (range of 0-255) but the possible range of QuantizedRelu6 is - 0-6. The min_range and max_range values are therefore 0.0 and 6.0. - Dequantize on quint8 will take each value, cast to float, and multiply - by 6 / 255. - Note that if quantizedtype is qint8, the operation will additionally add - each value by 128 prior to casting.

If the mode is MIN_FIRST, then this approach is used:

``` - number_of_steps = 1 << (# of bits in T) - range_adjust = number_of_steps / (number_of_steps - 1) - range = (range_max - range_min) * range_adjust - range_scale = range / number_of_steps - const double offset_input = static_castdouble(input) - lowest_quantized; - result = range_min + ((input - numeric_limitsT::min()) * range_scale) - ```

drawBoundingBoxes Source

Arguments

:: (TensorType t, OneOf `[Word16, Float]` t) 
=> Tensor v1 t

images: 4-D with shape `[batch, height, width, depth]`. A batch of images.

-> Tensor v2 Float

boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding - boxes.

-> Tensor Value t

output: 4-D with the same shape as images. The batch of input images with - bounding boxes drawn on the images.

Draw bounding boxes on a batch of images.

Outputs a copy of images but draws on top of the pixels zero or more bounding - boxes specified by the locations in boxes. The coordinates of the each - bounding box in boxes are encoded as `[y_min, x_min, y_max, x_max]`. The - bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and - height of the underlying image.

For example, if an image is 100 x 200 pixels and the bounding box is - `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the - bounding box will be `(10, 40)` to `(50, 180)`.

Parts of the bounding box may fall outside the image.

tensorArraySplit Source

Arguments

:: TensorType t 
=> Tensor Ref ByteString

handle

-> Tensor v2 t

value

-> Tensor v3 Int64

lengths

-> Tensor v4 Float

flow_in

-> Build (Tensor Value Float)

flow_out

stringToHashBucketFast Source

Arguments

:: Int64

num_buckets: The number of buckets.

-> Tensor v1 ByteString

input: The strings to assign a hash bucket.

-> Tensor Value Int64

output: A Tensor of the same shape as the input string_tensor.

Converts each string in the input Tensor to its hash mod by a number of buckets.

The hash function is deterministic on the content of the string within the - process and will never change. However, it is not suitable for cryptography. - This function may be used when CPU time is scarce and inputs are trusted or - unimportant. There is a risk of adversaries constructing inputs that all hash - to the same bucket. To prevent this problem, use a strong hash function with - `tf.string_to_hash_bucket_strong`.

tensorArrayScatter Source

Arguments

:: TensorType t 
=> Tensor Ref ByteString

handle

-> Tensor v2 Int32

indices

-> Tensor v3 t

value

-> Tensor v4 Float

flow_in

-> Build (Tensor Value Float)

flow_out

oneHot Source

Arguments

:: (TensorType t, TensorType tI, OneOf `[Int32, Int64, Word8]` tI) 
=> Tensor v1 tI

indices: A tensor of indices.

-> Tensor v2 Int32

depth: A scalar defining the depth of the one hot dimension.

-> Tensor v3 t

on_value: A scalar defining the value to fill in output when `indices[j] = i`.

-> Tensor v4 t

off_value: A scalar defining the value to fill in output when `indices[j] != i`.

-> Tensor Value t

output: The one-hot tensor.

Returns a one-hot tensor.

The locations represented by indices in indices take value on_value, - while all other locations take value off_value.

If the input indices is rank N, the output will have rank `N+1`, - The new axis is created at dimension axis (default: the new axis is - appended at the end).

If indices is a scalar the output shape will be a vector of length depth.

If indices is a vector of length features, the output shape will be: - ``` - features x depth if axis == -1 - depth x features if axis == 0 - ```

If indices is a matrix (batch) with shape `[batch, features]`, - the output shape will be: - ``` - batch x features x depth if axis == -1 - batch x depth x features if axis == 1 - depth x batch x features if axis == 0 - ```

Examples - =========

Suppose that

``` - indices = [0, 2, -1, 1] - depth = 3 - on_value = 5.0 - off_value = 0.0 - axis = -1 - ```

Then output is `[4 x 3]`:

```output = - [5.0 0.0 0.0] // one_hot(0) - [0.0 0.0 5.0] // one_hot(2) - [0.0 0.0 0.0] // one_hot(-1) - [0.0 5.0 0.0] // one_hot(1) - ```

Suppose that

``` - indices = [0, 2, -1, 1] - depth = 3 - on_value = 0.0 - off_value = 3.0 - axis = 0 - ```

Then output is `[3 x 4]`:

```output = - [0.0 3.0 3.0 3.0] - [3.0 3.0 3.0 0.0] - [3.0 3.0 3.0 3.0] - [3.0 0.0 3.0 3.0] - // ^ one_hot(0) - // ^ one_hot(2) - // ^ one_hot(-1) - // ^ one_hot(1) - ``` - Suppose that

``` - indices = [[0, 2], [1, -1]] - depth = 3 - on_value = 1.0 - off_value = 0.0 - axis = -1 - ```

Then output is `[2 x 2 x 3]`:

```output = - [ - [1.0, 0.0, 0.0] // one_hot(0) - [0.0, 0.0, 1.0] // one_hot(2) - ][ - [0.0, 1.0, 0.0] // one_hot(1) - [0.0, 0.0, 0.0] // one_hot(-1) - ]```

batchIFFT3D Source

Arguments

:: Tensor v1 (Complex Float)

input

-> Tensor Value (Complex Float)

output

decodeRaw Source

Arguments

:: (TensorType out_type, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` out_type) 
=> Tensor v1 ByteString

bytes: All the elements must have the same length.

-> Tensor Value out_type

output: A Tensor with one more dimension than the input bytes. The - added dimension will have size equal to the length of the elements - of bytes divided by the number of bytes to represent out_type.

Reinterpret the bytes of a string as a vector of numbers.

tensorArrayPack Source

Arguments

:: TensorType dtype 
=> Tensor Ref ByteString

handle

-> Tensor v2 Float

flow_in

-> Build (Tensor Value dtype)

value

applyProximalAdagrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor v3 t

lr: Scaling factor. Must be a scalar.

-> Tensor v4 t

l1: L1 regularization. Must be a scalar.

-> Tensor v5 t

l2: L2 regularization. Must be a scalar.

-> Tensor v6 t

grad: The gradient.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.

accum += grad * grad - prox_v = var - lr * grad * (1 / sqrt(accum)) - var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}

sparseAccumulatorApplyGradient Source

Arguments

:: (TensorType dtype, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> Bool

has_known_shape: Boolean indicating whether gradient_shape is unknown, in which - case the input is ignored during validation.

-> Tensor Ref ByteString

handle: The handle to a accumulator.

-> Tensor v2 Int64

local_step: The local_step value at which the sparse gradient was computed.

-> Tensor v3 Int64

gradient_indices: Indices of the sparse gradient to be accumulated. Must be a - vector.

-> Tensor v4 dtype

gradient_values: Values are the non-zero slices of the gradient, and must have - the same first dimension as indices, i.e., the nnz represented by indices and - values must be consistent.

-> Tensor v5 Int64

gradient_shape: Shape of the sparse gradient to be accumulated.

-> Build ControlNode 

Applies a sparse gradient to a given accumulator. Does not add if local_step is

lesser than the accumulator's global_step.

add Source

Returns x + y element-wise.

  • NOTE*: Add supports broadcasting. AddN does not. More about broadcasting - here

softsign Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

features

-> Tensor Value t

activations

Computes softsign: `features / (abs(features) + 1)`.

tensorArrayRead Source

Arguments

:: TensorType dtype 
=> Tensor Ref ByteString

handle

-> Tensor v2 Int32

index

-> Tensor v3 Float

flow_in

-> Build (Tensor Value dtype)

value

scatterNdSub Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: A mutable Tensor. Should be from a Variable node.

-> Tensor v2 tindices

indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

-> Tensor v3 t

updates: A Tensor. Must have the same type as ref. A tensor of updated values - to subtract from ref.

-> Build (Tensor Ref t)

output_ref: Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

Applies sparse subtraction between updates and individual values or slices

within a given variable according to indices.

ref is a Tensor with rank P and indices is a Tensor of rank Q.

indices must be integer tensor, containing indices into ref. - It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

The innermost dimension of indices (with length K) corresponds to - indices into elements (if `K = P`) or slices (if `K < P`) along the Kth - dimension of ref.

updates is Tensor of rank `Q-1+P-K` with shape:

``` - [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. - ```

For example, say we want to subtract 4 scattered elements from a rank-1 tensor - with 8 elements. In Python, that subtraction would look like this:

ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - indices = tf.constant([[4], [3], [1], [7]]) - updates = tf.constant([9, 10, 11, 12]) - sub = tf.scatter_nd_sub(ref, indices, updates) - with tf.Session() as sess: - print sess.run(sub)

The resulting update to ref would look like this:

1, -9, 3, -6, -4, 6, 7, -4

See tf.scatter_nd for more details about how to make updates to - slices.

restoreSlice Source

Arguments

:: TensorType dt 
=> Tensor v1 ByteString

file_pattern: Must have a single element. The pattern of the files from - which we read the tensor.

-> Tensor v2 ByteString

tensor_name: Must have a single element. The name of the tensor to be - restored.

-> Tensor v3 ByteString

shape_and_slice: Scalar. The shapes and slice specifications to use when - restoring a tensors.

-> Tensor Value dt

tensor: The restored tensor.

Restores a tensor from checkpoint files.

This is like Restore except that restored tensor can be listed as filling - only a slice of a larger tensor. shape_and_slice specifies the shape of the - larger tensor and the slice that the restored tensor covers.

The shape_and_slice input has the same format as the - elements of the shapes_and_slices input of the SaveSlices op.

assignAdd Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v2 t

value: The value to be added to the variable.

-> Build (Tensor Ref t)

output_ref: = Same as "ref". Returned as a convenience for operations that want - to use the new value after the variable has been updated.

Update ref by adding value to it.

This operation outputs "ref" after the update is done. - This makes it easier to chain operations that need to use the reset value.

greater Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value Bool

z

Returns the truth value of (x > y) element-wise.

  • NOTE*: Greater supports broadcasting. More about broadcasting - here

readerNumWorkUnitsCompleted Source

Arguments

:: Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Build (Tensor Value Int64)

units_completed

Returns the number of work units this Reader has finished processing.

tensorArrayGatherV2 Source

Arguments

:: TensorType dtype 
=> Tensor v1 ByteString

handle: The handle to a TensorArray.

-> Tensor v2 Int32

indices: The locations in the TensorArray from which to read tensor elements.

-> Tensor v3 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> Tensor Value dtype

value: All of the elements in the TensorArray, concatenated along a new - axis (the new dimension 0).

Gather specific elements from the TensorArray into output value.

All elements selected by indices must have the same shape.

tensorArrayReadV2 Source

Arguments

:: TensorType dtype 
=> Tensor v1 ByteString

handle: The handle to a TensorArray.

-> Tensor v2 Int32

index

-> Tensor v3 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> Tensor Value dtype

value: The tensor that is read from the TensorArray.

Read an element from the TensorArray into output value.

decodeBase64 Source

Arguments

:: Tensor v1 ByteString

input: Base64 strings to decode.

-> Tensor Value ByteString

output: Decoded strings.

Decode web-safe base64-encoded strings.

Input may or may not have padding at the end. See EncodeBase64 for padding. - Web-safe means that input must use - and _ instead of + and /.

tensorArrayWriteV2 Source

Arguments

:: TensorType t 
=> Tensor v1 ByteString

handle: The handle to a TensorArray.

-> Tensor v2 Int32

index: The position to write to inside the TensorArray.

-> Tensor v3 t

value: The tensor to write to the TensorArray.

-> Tensor v4 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> Tensor Value Float

flow_out: A float scalar that enforces proper chaining of operations.

Push an element onto the tensor_array.

audioSummary Source

Arguments

:: Float

sample_rate: The sample rate of the signal in hertz.

-> Tensor v1 ByteString

tag: Scalar. Used to build the tag attribute of the summary values.

-> Tensor v2 Float

tensor: 2-D of shape `[batch_size, frames]`.

-> Tensor Value ByteString

summary: Scalar. Serialized Summary protocol buffer.

Outputs a Summary protocol buffer with audio.

The summary has up to max_outputs summary values containing audio. The - audio is built from tensor which must be 3-D with shape `[batch_size, - frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are - assumed to be in the range of `[-1.0, 1.0]` with a sample rate of sample_rate.

The tag argument is a scalar Tensor of type string. It is used to - build the tag of the summary values:

  • If max_outputs is 1, the summary value tag is '*tag*/audio'.
  • If max_outputs is greater than 1, the summary value tags are - generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.

isFinite Source

Arguments

:: (TensorType t, OneOf `[Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value Bool

y

Returns which elements of x are finite.

compatibility(numpy) - Equivalent to np.isfinite - end_compatibility

tensorArrayConcat Source

Arguments

:: TensorType dtype 
=> Tensor Ref ByteString

handle

-> Tensor v2 Float

flow_in

-> Build (Tensor Value dtype, Tensor Value Int64)

(value, lengths)

  • value
  • lengths

sparseReduceSum Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 Int64

input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

-> Tensor v2 t

input_values: 1-D. N non-empty values corresponding to input_indices.

-> Tensor v3 Int64

input_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v4 Int32

reduction_axes: 1-D. Length-K vector containing the reduction axes.

-> Tensor Value t

output: `R-K`-D. The reduced Tensor.

Computes the sum of elements across dimensions of a SparseTensor.

This Op takes a SparseTensor and is the sparse counterpart to - `tf.reduce_sum()`. In particular, this Op also returns a dense Tensor - instead of a sparse one.

Reduces sp_input along the dimensions given in reduction_axes. Unless - keep_dims is true, the rank of the tensor is reduced by 1 for each entry in - reduction_axes. If keep_dims is true, the reduced dimensions are retained - with length 1.

If reduction_axes has no entries, all dimensions are reduced, and a tensor - with a single element is returned. Additionally, the axes can be negative, - which are interpreted according to the indexing rules in Python.

realDiv Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns x / y element-wise for real types.

If x and y are reals, this will return the floating-point division.

  • NOTE*: Div supports broadcasting. More about broadcasting - here

biasAddV1 Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

value: Any number of dimensions.

-> Tensor v2 t

bias: 1-D with size the last dimension of value.

-> Tensor Value t

output: Broadcasted sum of value and bias.

Adds bias to value.

This is a deprecated version of BiasAdd and will be soon removed.

This is a special case of `tf.add` where bias is restricted to be 1-D. - Broadcasting is supported, so value may have any number of dimensions.

logicalOr Source

Arguments

:: Tensor v1 Bool

x

-> Tensor v2 Bool

y

-> Tensor Value Bool

z

Returns the truth value of x OR y element-wise.

  • NOTE*: LogicalOr supports broadcasting. More about broadcasting - here

stackPush Source

Arguments

:: TensorType t 
=> Tensor Ref ByteString

handle: The handle to a stack.

-> Tensor v2 t

elem: The tensor to be pushed onto the stack.

-> Build (Tensor Value t)

output: The same tensor as the input elem.

Push an element onto the stack.

quantizedRelu Source

Arguments

:: (TensorType tinput, OneOf `[Int16, Int32, Word16, Word8]` tinput, TensorType out_type, OneOf `[Int16, Int32, Word16, Word8]` out_type) 
=> Tensor v1 tinput

features

-> Tensor v2 Float

min_features: The float value that the lowest quantized value represents.

-> Tensor v3 Float

max_features: The float value that the highest quantized value represents.

-> (Tensor Value out_type, Tensor Value Float, Tensor Value Float)

(activations, min_activations, max_activations)

  • activations: Has the same output shape as "features".
  • min_activations: The float value that the lowest quantized value represents.
  • max_activations: The float value that the highest quantized value represents.

Computes Quantized Rectified Linear: `max(features, 0)`

broadcastGradientArgs Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` t) 
=> Tensor v1 t

s0

-> Tensor v2 t

s1

-> (Tensor Value t, Tensor Value t)

(r0, r1)

  • r0
  • r1

Return the reduction indices for computing gradients of s0 op s1 with broadcast.

This is typically used by gradient computations for a broadcasting operation.

uniqueWithCounts Source

Arguments

:: (TensorType t, TensorType out_idx, OneOf `[Int32, Int64]` out_idx) 
=> Tensor v1 t

x: 1-D.

-> (Tensor Value t, Tensor Value out_idx, Tensor Value out_idx)

(y, idx, count)

  • y: 1-D.
  • idx: 1-D.
  • count: 1-D.

Finds unique elements in a 1-D tensor.

This operation returns a tensor y containing all of the unique elements of x - sorted in the same order that they occur in x. This operation also returns a - tensor idx the same size as x that contains the index of each value of x - in the unique output y. Finally, it returns a third tensor count that - contains the count of each element of y in x. In other words:

`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`

For example:

```prettyprint - # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] - y, idx, count = unique_with_counts(x) - y ==> [1, 2, 4, 7, 8] - idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - count ==> [2, 1, 3, 1, 2] - ```

truncateMod Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Returns element-wise remainder of division. This emulates C semantics where

true, this follows C semantics in that the result here is consistent - with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.

  • NOTE*: Mod supports broadcasting. More about broadcasting - here

stridedSliceGrad Source

Arguments

:: (TensorType t, TensorType index, OneOf `[Int32, Int64]` index) 
=> Tensor v1 index

shape

-> Tensor v2 index

begin

-> Tensor v3 index

end

-> Tensor v4 index

strides

-> Tensor v5 t

dy

-> Tensor Value t

output

Returns the gradient of StridedSlice.

Since StridedSlice cuts out pieces of its input which is size - shape, its gradient will have the same shape (which is passed here - as shape). The gradient will be zero in any element that the slice - does not select.

Arguments are the same as StridedSliceGrad with the exception that - dy is the input gradient to be propagated and shape is the - shape of StridedSlice's input.

fractionalAvgPool Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64, Double, Float]` t) 
=> Tensor v1 t

value: 4-D with shape `[batch, height, width, channels]`.

-> (Tensor Value t, Tensor Value Int64, Tensor Value Int64)

(output, row_pooling_sequence, col_pooling_sequence)

  • output: output tensor after fractional avg pooling.
  • row_pooling_sequence: row pooling sequence, needed to calculate gradient.
  • col_pooling_sequence: column pooling sequence, needed to calculate gradient.

Performs fractional average pooling on the input.

Fractional average pooling is similar to Fractional max pooling in the pooling - region generation step. The only difference is that after pooling regions are - generated, a mean operation is performed instead of a max operation in each - pooling region.

sparseAccumulatorTakeGradient Source

Arguments

:: (TensorType dtype, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` dtype) 
=> Tensor Ref ByteString

handle: The handle to a SparseConditionalAccumulator.

-> Tensor v2 Int32

num_required: Number of gradients required before we return an aggregate.

-> Build (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

(indices, values, shape)

  • indices: Indices of the average of the accumulated sparse gradients.
  • values: Values of the average of the accumulated sparse gradients.
  • shape: Shape of the average of the accumulated sparse gradients.

Extracts the average sparse gradient in the given SparseConditionalAccumulator,

provided that sufficient (i.e., more than num_required) gradients have been - accumulated. The op will blocks until sufficient gradients have been - accumulated. If the accumulator has already aggregated more than num_required - gradients, it will return its average of the accumulated gradients. - Also automatically increments the recorded global_step in the accumulator by 1, - and resets the aggregate to 0.

decodeJSONExample Source

Arguments

:: Tensor v1 ByteString

json_examples: Each string is a JSON object serialized according to the JSON - mapping of the Example proto.

-> Tensor Value ByteString

binary_examples: Each string is a binary Example protocol buffer corresponding - to the respective element of json_examples.

Convert JSON-encoded Example records to binary protocol buffer strings.

This op translates a tensor containing Example records, encoded using - the standard JSON - mapping, - into a tensor containing the same records encoded as binary protocol - buffers. The resulting tensor can then be fed to any of the other - Example-parsing ops.

placeholderWithDefault Source

Arguments

:: TensorType dtype 
=> Shape

shape: The (possibly partial) shape of the tensor.

-> Tensor v1 dtype

input: The default value to produce when output is not fed.

-> Tensor Value dtype

output: A placeholder tensor that defaults to input if it is not fed.

A placeholder op that passes though input when its output is not fed.

applyFtrl Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor Ref t

var: Should be from a Variable().

-> Tensor Ref t

accum: Should be from a Variable().

-> Tensor Ref t

linear: Should be from a Variable().

-> Tensor v4 t

grad: The gradient.

-> Tensor v5 t

lr: Scaling factor. Must be a scalar.

-> Tensor v6 t

l1: L1 regulariation. Must be a scalar.

-> Tensor v7 t

l2: L2 regulariation. Must be a scalar.

-> Tensor v8 t

lr_power: Scaling factor. Must be a scalar.

-> Build (Tensor Ref t)

out: Same as "var".

Update '*var' according to the Ftrl-proximal scheme.

accum_new = accum + grad * grad - linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var - quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 - accum = accum_new

sdcaShrinkL1 Source

Arguments

:: Float

l1: Symmetric l1 regularization strength.

-> Float

l2: Symmetric l2 regularization strength. Should be a positive float.

-> [Tensor Ref Float]

weights: a list of vectors where each value is the weight associated with a - feature group.

-> Build ControlNode 

Applies L1 regularization shrink step on the parameters.

shardedFilename Source

Arguments

:: Tensor v1 ByteString

basename

-> Tensor v2 Int32

shard

-> Tensor v3 Int32

num_shards

-> Tensor Value ByteString

filename

Generate a sharded filename. The filename is printf formatted as

%s-%05d-of-%05d, basename, shard, num_shards.

fakeQuantWithMinMaxArgs Source

Arguments

:: Tensor v1 Float

inputs

-> Tensor Value Float

outputs

Fake-quantize the inputs tensor, type float to outputs tensor of same type.

Attributes [min; max] define the clamping range for the inputs data. Op - divides this range into 255 steps (total of 256 values), then replaces each - inputs value with the closest of the quantized step values.

Quantization is called fake since the output is still in floating point.

scatterNdAdd Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: A mutable Tensor. Should be from a Variable node.

-> Tensor v2 tindices

indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

-> Tensor v3 t

updates: A Tensor. Must have the same type as ref. A tensor of updated values - to add to ref.

-> Build (Tensor Ref t)

output_ref: Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

Applies sparse addition between updates and individual values or slices

within a given variable according to indices.

ref is a Tensor with rank P and indices is a Tensor of rank Q.

indices must be integer tensor, containing indices into ref. - It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

The innermost dimension of indices (with length K) corresponds to - indices into elements (if `K = P`) or slices (if `K < P`) along the Kth - dimension of ref.

updates is Tensor of rank `Q-1+P-K` with shape:

``` - [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. - ```

For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 - elements. In Python, that addition would look like this:

ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - indices = tf.constant([[4], [3], [1], [7]]) - updates = tf.constant([9, 10, 11, 12]) - add = tf.scatter_nd_add(ref, indices, updates) - with tf.Session() as sess: - print sess.run(add)

The resulting update to ref would look like this:

1, 13, 3, 14, 14, 6, 7, 20

See tf.scatter_nd for more details about how to make updates to - slices.

accumulatorNumAccumulated Source

Arguments

:: Tensor Ref ByteString

handle: The handle to an accumulator.

-> Build (Tensor Value Int32)

num_accumulated: The number of gradients aggregated in the given accumulator.

Returns the number of gradients aggregated in the given accumulators.

sparseSegmentSqrtN Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

data

-> Tensor v2 tidx

indices: A 1-D tensor. Has same rank as segment_ids.

-> Tensor v3 Int32

segment_ids: A 1-D tensor. Values should be sorted and can be repeated.

-> Tensor Value t

output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

Computes the sum along sparse segments of a tensor divided by the sqrt of N.

N is the size of the segment being reduced.

Read the section on - Segmentation for an explanation - of segments.

depthToSpace Source

Arguments

:: TensorType t 
=> Int64

block_size: The size of the spatial block, same as in Space2Depth.

-> Tensor v1 t

input

-> Tensor Value t

output

DepthToSpace for tensors of type T.

Rearranges data from depth into blocks of spatial data. - This is the reverse transformation of SpaceToDepth. More specifically, - this op outputs a copy of the input tensor where values from the depth - dimension are moved in spatial blocks to the height and width dimensions. - The attr block_size indicates the input block size and how the data is moved.

  • Chunks of data of size `block_size * block_size` from depth are rearranged - into non-overlapping blocks of size `block_size x block_size`
  • The width the output tensor is `input_depth * block_size`, whereas the - height is `input_height * block_size`.
  • The depth of the input tensor must be divisible by - `block_size * block_size`.

That is, assuming the input is in the shape: - `[batch, height, width, depth]`, - the shape of the output will be: - `[batch, height*block_size, width*block_size, depth/(block_size*block_size)]`

This operation requires that the input tensor be of rank 4, and that - block_size be >=1 and that `block_size * block_size` be a divisor of the - input depth.

This operation is useful for resizing the activations between convolutions - (but keeping all data), e.g. instead of pooling. It is also useful for training - purely convolutional models.

For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2:

```prettyprint - x = [[[[1, 2, 3, 4]]]]

```

This operation will output a tensor of shape `[1, 2, 2, 1]`:

```prettyprint - [[[[1], [2]], - [[3], [4]]]] - ```

Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, - the corresponding output will have 2x2 elements and will have a depth of - 1 channel (1 = `4 / (block_size * block_size)`). - The output element shape is `[2, 2, 1]`.

For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.

```prettyprint - x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] - ```

This operation, for block size of 2, will return the following tensor of shape - `[1, 2, 2, 3]`

```prettyprint - [[[[1, 2, 3], [4, 5, 6]], - [[7, 8, 9], [10, 11, 12]]]]

```

Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:

```prettyprint - x = [[[[1, 2, 3, 4], - [5, 6, 7, 8]], - [[9, 10, 11, 12], - [13, 14, 15, 16]]]] - ```

the operator will return the following tensor of shape `[1 4 4 1]`:

```prettyprint - x = [[ [1], [2], [5], [6]], - [ [3], [4], [7], [8]], - [ [9], [10], [13], [14]], - [ [11], [12], [15], [16]]]

```

allCandidateSampler Source

Arguments

:: Int64

num_sampled: Number of candidates to produce per batch.

-> Int64

num_true: Number of true labels per context.

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled - candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

-> Tensor v1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is - the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing - the number of times each candidate is expected to occur in a batch - of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled - candidate representing the number of times the candidate is expected - to occur in a batch of sampled candidates. If unique=true, then this is a - probability.

Generates labels for candidate sampling with a learned unigram distribution.

See explanations of candidate sampling and the data formats at - go/candidate-sampling.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the - possibility of efficient dense matrix multiplication. The disadvantage is that - the sampled candidates must be chosen independently of the context and of the - true labels.

resizeNearestNeighborGrad Source

Arguments

:: (TensorType t, OneOf `[Int32, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

grads: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v2 Int32

size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The - original input size.

-> Tensor Value t

output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients - with respect to the input image.

Computes the gradient of nearest neighbor interpolation.

cTCGreedyDecoder Source

Arguments

:: Tensor v1 Float

inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

-> Tensor v2 Int32

sequence_length: A vector containing sequence lengths, size `(batch_size)`.

-> (Tensor Value Int64, Tensor Value Int64, Tensor Value Int64, Tensor Value Float)

(decoded_indices, decoded_values, decoded_shape, log_probability)

  • decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`, - of a `SparseTensor2`. The rows store: [batch, time].
  • decoded_values: Values vector, size: `(total_decoded_outputs)`, - of a `SparseTensor2`. The vector stores the decoded classes.
  • decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor. - Values are: `[batch_size, max_decoded_length]`.
  • log_probability: Matrix, size `(batch_size x 1)`, containing sequence - log-probabilities.

Performs greedy decoding on the logits given in inputs.

A note about the attribute merge_repeated: if enabled, when - consecutive logits' maximum indices are the same, only the first of - these is emitted. Labeling the blank *, the sequence "A B B * B B" - becomes "A B" if merge_repeated = True and "A B B B B" if - merge_repeated = False.

Regardless of the value of merge_repeated, if the maximum index of a given - time and batch corresponds to the blank, index `(num_classes - 1)`, no new - element is emitted.

l2Loss Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

t: Typically 2-D, but may have any dimensions.

-> Tensor Value t

output: 0-D.

L2 Loss.

Computes half the L2 norm of a tensor without the sqrt:

output = sum(t ** 2) / 2

segmentMax Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor v1 t

data

-> Tensor v2 tindices

segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s - first dimension. Values should be sorted and can be repeated.

-> Tensor Value t

output: Has same shape as data, except for dimension 0 which - has size k, the number of segments.

Computes the maximum along segments of a tensor.

Read the section on Segmentation - for an explanation of segments.

Computes a tensor such that - \(output_i = max_j(data_j)\) where max is over j such - that `segment_ids[j] == i`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/SegmentMax.png" alt - /div

countUpTo Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` t) 
=> Int64

limit: If incrementing ref would bring it above limit, instead generates an - OutOfRange error.

-> Tensor Ref t

ref: Should be from a scalar Variable node.

-> Build (Tensor Value t)

output: A copy of the input before increment. If nothing else modifies the - input, the values produced will all be distinct.

Increments ref until it reaches limit.

tFRecordReader Source

Arguments

:: Build (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

A Reader that outputs the records from a TensorFlow Records file.

switch Source

Arguments

:: TensorType t 
=> Tensor v1 t

data: The tensor to be forwarded to the appropriate output.

-> Tensor v2 Bool

pred: A scalar that specifies which output port will receive data.

-> (Tensor Value t, Tensor Value t)

(output_false, output_true)

  • output_false: If pred is false, data will be forwarded to this output.
  • output_true: If pred is true, data will be forwarded to this output.

Forwards `data` to the output port determined by pred.

If pred is true, the `data` input is forwarded to output_true. Otherwise, - the data goes to output_false.

See also RefSwitch and Merge.

sparseSegmentMeanGrad Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

grad: gradient propagated to the SparseSegmentMean op.

-> Tensor v2 tidx

indices: indices passed to the corresponding SparseSegmentMean op.

-> Tensor v3 Int32

segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.

-> Tensor v4 Int32

output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.

-> Tensor Value t

output

Computes gradients for SparseSegmentMean.

Returns tensor "output" with same shape as grad, except for dimension 0 whose - value is output_dim0.

gatherNd Source

Arguments

:: (TensorType tparams, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor v1 tparams

params: `P-D`. The tensor from which to gather values.

-> Tensor v2 tindices

indices: `Q-D`. Index tensor having shape `[d_0, ..., d_{Q-2}, K]`.

-> Tensor Value tparams

output: `(P+Q-K-1)-D`. Values from params gathered from indices given by - indices.

Gather values or slices from params according to indices.

params is a Tensor of rank P and indices is a Tensor of rank Q.

indices must be integer tensor, containing indices into params. - It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

The innermost dimension of indices (with length K) corresponds to - indices into elements (if `K = P`) or slices (if `K < P`) along the Kth - dimension of params.

Produces an output tensor with shape

``` - [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]]. - ```

Some examples below.

Simple indexing into a matrix:

```python - indices = [[0, 0], [1, 1]] - params = [[a, b], [c, d]] - output = [a, d] - ```

Slice indexing into a matrix:

```python - indices = [[1], [0]] - params = [[a, b], [c, d]] - output = [[c, d], [a, b]] - ```

Indexing into a 3-tensor:

```python - indices = [[1]] - params = [[[a0, b0], [c0, d0]], - [[a1, b1], [c1, d1]]] - output = [[[a1, b1], [c1, d1]]]

indices = [[0, 1], [1, 0]] - params = [[[a0, b0], [c0, d0]], - [[a1, b1], [c1, d1]]] - output = [[c0, d0], [a1, b1]]

indices = [[0, 0, 1], [1, 0, 1]] - params = [[[a0, b0], [c0, d0]], - [[a1, b1], [c1, d1]]] - output = [b0, b1] - ```

Batched indexing into a matrix:

```python - indices = [[[0, 0]], [[0, 1]]] - params = [[a, b], [c, d]] - output = [[a], [b]] - ```

Batched slice indexing into a matrix:

```python - indices = [[[1]], [[0]]] - params = [[a, b], [c, d]] - output = [[[c, d]], [[a, b]]] - ```

Batched indexing into a 3-tensor:

```python - indices = [[[1]], [[0]]] - params = [[[a0, b0], [c0, d0]], - [[a1, b1], [c1, d1]]] - output = [[[[a1, b1], [c1, d1]]], - [[[a0, b0], [c0, d0]]]]

indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] - params = [[[a0, b0], [c0, d0]], - [[a1, b1], [c1, d1]]] - output = [[[c0, d0], [a1, b1]], - [[a0, b0], [c1, d1]]]

indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] - params = [[[a0, b0], [c0, d0]], - [[a1, b1], [c1, d1]]] - output = [[b0, b1], [d0, c1]] - ```

squeeze Source

Arguments

:: TensorType t 
=> Tensor v1 t

input: The input to squeeze.

-> Tensor Value t

output: Contains the same data as input, but has one or more dimensions of - size 1 removed.

Removes dimensions of size 1 from the shape of a tensor.

Given a tensor input, this operation returns a tensor of the same type with - all dimensions of size 1 removed. If you don't want to remove all size 1 - dimensions, you can remove specific size 1 dimensions by specifying - squeeze_dims.

For example:

```prettyprint - # t is a tensor of shape [1, 2, 1, 3, 1, 1] - shape(squeeze(t)) ==> [2, 3] - ```

Or, to remove specific size 1 dimensions:

```prettyprint - # t is a tensor of shape [1, 2, 1, 3, 1, 1] - shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] - ```

randomUniform Source

Arguments

:: (TensorType dtype, OneOf `[Word16, Double, Float]` dtype, TensorType t, OneOf `[Int32, Int64]` t) 
=> Tensor v1 t

shape: The shape of the output tensor.

-> Build (Tensor Value dtype)

output: A tensor of the specified shape filled with uniform random values.

Outputs random values from a uniform distribution.

The generated values follow a uniform distribution in the range `[0, 1)`. The - lower bound 0 is included in the range, while the upper bound 1 is excluded.

readerReadUpTo Source

Arguments

:: Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Tensor Ref ByteString

queue_handle: Handle to a Queue, with string work items.

-> Tensor v3 Int64

num_records: number of records to read from Reader.

-> Build (Tensor Value ByteString, Tensor Value ByteString)

(keys, values)

  • keys: A 1-D tensor.
  • values: A 1-D tensor.

Returns up to num_records (key, value) pairs produced by a Reader.

Will dequeue from the input queue if necessary (e.g. when the - Reader needs to start reading from a new file since it has finished - with the previous file). - It may return less than num_records even before the last batch.

conv3DBackpropInput Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

input: Shape `[batch, depth, rows, cols, in_channels]`.

-> Tensor v2 t

filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - in_channels must match between input and filter.

-> Tensor v3 t

out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - out_channels]`.

-> Tensor Value t

output

Computes the gradients of 3-D convolution with respect to the input.

depthwiseConv2dNative Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input

-> Tensor v2 t

filter

-> Tensor Value t

output

Computes a 2-D depthwise convolution given 4-D input and filter tensors.

Given an input tensor of shape `[batch, in_height, in_width, in_channels]` - and a filter / kernel tensor of shape - `[filter_height, filter_width, in_channels, channel_multiplier]`, containing - in_channels convolutional filters of depth 1, depthwise_conv2d applies - a different filter to each input channel (expanding from 1 channel to - channel_multiplier channels for each), then concatenates the results - together. Thus, the output has `in_channels * channel_multiplier` channels.

for k in 0..in_channels-1 - for q in 0..channel_multiplier-1 - output[b, i, j, k * channel_multiplier + q] = - sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * - filter[di, dj, k, q]

Must have `strides[0] = strides[3] = 1`. For the most common case of the same - horizontal and vertices strides, `strides = [1, stride, stride, 1]`.

learnedUnigramCandidateSampler Source

Arguments

:: Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled - candidates in a batch are unique. This requires some approximation to - estimate the post-rejection sampling probabilities.

-> Tensor v1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the - IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Value Int64, Tensor Value Float, Tensor Value Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is - the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing - the number of times each candidate is expected to occur in a batch - of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled - candidate representing the number of times the candidate is expected - to occur in a batch of sampled candidates. If unique=true, then this is a - probability.

Generates labels for candidate sampling with a learned unigram distribution.

See explanations of candidate sampling and the data formats at - go/candidate-sampling.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the - possibility of efficient dense matrix multiplication. The disadvantage is that - the sampled candidates must be chosen independently of the context and of the - true labels.

initializeTable Source

Arguments

:: (TensorType tkey, TensorType tval) 
=> Tensor Ref ByteString

table_handle: Handle to a table which will be initialized.

-> Tensor v2 tkey

keys: Keys of type Tkey.

-> Tensor v3 tval

values: Values of type Tval.

-> Build ControlNode 

Table initializer that takes two tensors for keys and values respectively.

merge Source

Arguments

:: TensorType t 
=> [Tensor v1 t]

inputs: The input tensors, exactly one of which will become available.

-> (Tensor Value t, Tensor Value Int32)

(output, value_index)

  • output: Will be set to the available input tensor.
  • value_index: The index of the chosen input tensor in inputs.

Forwards the value of an available tensor from inputs to output.

Merge waits for at least one of the tensors in inputs to become available. - It is usually combined with Switch to implement branching.

Merge forwards the first tensor for become available to output, and sets - value_index to its index in inputs.

refMerge Source

Arguments

:: TensorType t 
=> [Tensor Ref t]

inputs: The input tensors, exactly one of which will become available.

-> Build (Tensor Ref t, Tensor Value Int32)

(output, value_index)

  • output: Will be set to the available input tensor.
  • value_index: The index of the chosen input tensor in inputs.

Forwards the value of an available tensor from inputs to output.

Merge waits for at least one of the tensors in inputs to become available. - It is usually combined with Switch to implement branching.

Merge forwards the first tensor for become available to output, and sets - value_index to its index in inputs.

round Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor Value t

y

Rounds the values of a tensor to the nearest integer, element-wise.

Rounds half to even. Also known as bankers rounding. If you want to round - according to the current system rounding mode use std::cint.

batchSelfAdjointEig Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t) 
=> Tensor v1 t

input

-> Tensor Value t

output

dynamicPartition Source

Arguments

:: TensorType t 
=> Int64

num_partitions: The number of partitions to output.

-> Tensor v1 t

data

-> Tensor v2 Int32

partitions: Any shape. Indices in the range `[0, num_partitions)`.

-> [Tensor Value t]

outputs

Partitions `data` into num_partitions tensors using indices from partitions.

For each index tuple js of size `partitions.ndim`, the slice `data[js, ...]` - becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` - are placed in `outputs[i]` in lexicographic order of js, and the first - dimension of `outputs[i]` is the number of entries in partitions equal to i. - In detail,

```python - outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]

outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) - ```

`data.shape` must start with `partitions.shape`.

For example:

```python - # Scalar partitions. - partitions = 1 - num_partitions = 2 - data = [10, 20] - outputs[0] = [] # Empty with shape [0, 2] - outputs[1] = [[10, 20]]

# Vector partitions. - partitions = [0, 0, 1, 1, 0] - num_partitions = 2 - data = [10, 20, 30, 40, 50] - outputs[0] = [10, 20, 50] - outputs[1] = [30, 40] - ```

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/DynamicPartition.png" alt - /div

reshape Source

Arguments

:: (TensorType t, TensorType tshape, OneOf `[Int32, Int64]` tshape) 
=> Tensor v1 t

tensor

-> Tensor v2 tshape

shape: Defines the shape of the output tensor.

-> Tensor Value t

output

Reshapes a tensor.

Given tensor, this operation returns a tensor that has the same values - as tensor with shape shape.

If one component of shape is the special value -1, the size of that dimension - is computed so that the total size remains constant. In particular, a shape - of `[-1]` flattens into 1-D. At most one component of shape can be -1.

If shape is 1-D or higher, then the operation returns a tensor with shape - shape filled with the values of tensor. In this case, the number of elements - implied by shape must be the same as the number of elements in tensor.

For example:

```prettyprint - # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] - # tensor t has shape [9] - reshape(t, [3, 3]) ==> [[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]

# tensor t is [[[1, 1], [2, 2]], - # [[3, 3], [4, 4]]] - # tensor t has shape [2, 2, 2] - reshape(t, [2, 4]) ==> [[1, 1, 2, 2], - [3, 3, 4, 4]]

# tensor t is [[[1, 1, 1], - # [2, 2, 2]], - # [[3, 3, 3], - # [4, 4, 4]], - # [[5, 5, 5], - # [6, 6, 6]]] - # tensor t has shape [3, 2, 3] - # pass '[-1]' to flatten t - reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]

# -1 can also be used to infer the shape

# -1 is inferred to be 9: - reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], - [4, 4, 4, 5, 5, 5, 6, 6, 6]] - # -1 is inferred to be 2: - reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], - [4, 4, 4, 5, 5, 5, 6, 6, 6]] - # -1 is inferred to be 3: - reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], - [2, 2, 2], - [3, 3, 3]], - [[4, 4, 4], - [5, 5, 5], - [6, 6, 6]]]

# tensor t is [7] - # shape `[]` reshapes to a scalar - reshape(t, []) ==> 7 - ```

fixedLengthRecordReader Source

Arguments

:: Int64

record_bytes

-> Build (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

A Reader that outputs fixed-length records from a file.

sdcaOptimizer Source

Arguments

:: Float

l1: Symmetric l1 regularization strength.

-> Float

l2: Symmetric l2 regularization strength.

-> Int64

num_inner_iterations: Number of iterations per mini-batch.

-> Int64

num_loss_partitions: Number of partitions of the global loss function.

-> [Tensor v1 Int64]

sparse_example_indices: a list of vectors which contain example indices.

-> [Tensor v2 Int64]

sparse_feature_indices: a list of vectors which contain feature indices.

-> [Tensor v3 Float]

sparse_feature_values: a list of vectors which contains feature value - associated with each feature group.

-> [Tensor v4 Float]

dense_features: a list of matrices which contains the dense feature values.

-> Tensor v5 Float

example_weights: a vector which contains the weight associated with each - example.

-> Tensor v6 Float

example_labels: a vector which contains the label/target associated with each - example.

-> [Tensor v7 Int64]

sparse_indices: a list of vectors where each value is the indices which has - corresponding weights in sparse_weights. This field maybe ommitted for the - dense approach.

-> [Tensor v8 Float]

sparse_weights: a list of vectors where each value is the weight associated with - a sparse feature group.

-> [Tensor v9 Float]

dense_weights: a list of vectors where the values are the weights associated - with a dense feature group.

-> Tensor v10 Float

example_state_data: a list of vectors containing the example state data.

-> (Tensor Value Float, [Tensor Value Float], [Tensor Value Float])

(out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights)

  • out_example_state_data: a list of vectors containing the updated example state - data.
  • out_delta_sparse_weights: a list of vectors where each value is the delta - weights associated with a sparse feature group.
  • out_delta_dense_weights: a list of vectors where the values are the delta - weights associated with a dense feature group.

Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for

linear models with L1 + L2 regularization. As global optimization objective is - strongly-convex, the optimizer optimizes the dual objective at each step. The - optimizer applies each update one example at a time. Examples are sampled - uniformly, and the optimizer is learning rate free and enjoys linear convergence - rate.

Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai; Zhang, Tong. - 2012 arXiv1211.2717S: http://arxiv.org/pdf/1211.2717v1.pdf

Loss objective = sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|

Adding vs. Averaging in Distributed Primal-Dual Optimization. - Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, - Martin Takac http://arxiv.org/abs/1502.03508

Stochastic Dual Coordinate Ascent with Adaptive Probabilities - Dominik Csiba, Zheng Qu, Peter Richtarik https://arxiv.org/abs/1502.08053

resizeArea Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 t

images: 4-D with shape `[batch, height, width, channels]`.

-> Tensor v2 Int32

size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - new size for the images.

-> Tensor Value Float

resized_images: 4-D with shape - `[batch, new_height, new_width, channels]`.

Resize images to size using area interpolation.

Input images can be of different types but output images are always float.

linSpace Source

Arguments

:: (TensorType t, OneOf `[Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

start: First entry in the range.

-> Tensor v2 t

stop: Last entry in the range.

-> Tensor v3 tidx

num: Number of values to generate.

-> Tensor Value t

output: 1-D. The generated values.

Generates values in an interval.

A sequence of num evenly-spaced values are generated beginning at start. - If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, - so that the last one is exactly stop.

For example:

``` - tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] - ```

cTCLoss Source

Arguments

:: Tensor v1 Float

inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.

-> Tensor v2 Int64

labels_indices: The indices of a `SparseTensor2`. - `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for - `(batch b, time t)`.

-> Tensor v3 Int32

labels_values: The values (labels) associated with the given batch and time.

-> Tensor v4 Int32

sequence_length: A vector containing sequence lengths (batch).

-> (Tensor Value Float, Tensor Value Float)

(loss, gradient)

  • loss: A vector (batch) containing log-probabilities.
  • gradient: The gradient of loss. 3-D, shape: - `(max_time x batch_size x num_classes)`.

Calculates the CTC Loss (log probability) for each batch entry. Also calculates

the gradient. This class performs the softmax operation for you, so inputs - should be e.g. linear projections of outputs by an LSTM.

matrixDiagPart Source

Arguments

:: TensorType t 
=> Tensor v1 t

input: Rank k tensor where `k >= 2`.

-> Tensor Value t

diagonal: The extracted diagonal(s) having shape - `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.

Returns the batched diagonal part of a batched tensor.

This operation returns a tensor with the diagonal part - of the batched input. The diagonal part is computed as follows:

Assume input has k dimensions `[I, J, K, ..., M, N]`, then the output is a - tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:

`diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.

The input must be at least a matrix.

For example:

```prettyprint - # input is [[[1, 0, 0, 0] - [0, 2, 0, 0] - [0, 0, 3, 0] - [0, 0, 0, 4]], - [[5, 0, 0, 0] - [0, 6, 0, 0] - [0, 0, 7, 0] - [0, 0, 0, 8]]]

and input.shape = (2, 4, 4)

tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]

which has shape (2, 4) - ```

enter Source

Arguments

:: TensorType t 
=> Tensor v1 t

data: The tensor to be made available to the child frame.

-> Tensor Value t

output: The same tensor as `data`.

Creates or finds a child frame, and makes `data` available to the child frame.

This op is used together with Exit to create loops in the graph. - The unique frame_name is used by the Executor to identify frames. If - is_constant is true, output is a constant in the child frame; otherwise - it may be changed in the child frame. At most parallel_iterations iterations - are run in parallel in the child frame.

encodePng Source

Arguments

:: (TensorType t, OneOf `[Word16, Word8]` t) 
=> Tensor v1 t

image: 3-D with shape `[height, width, channels]`.

-> Tensor Value ByteString

contents: 0-D. PNG-encoded image.

PNG-encode an image.

image is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` - where channels is:

  • 1: for grayscale.
  • 2: for grayscale + alpha.
  • 3: for RGB.
  • 4: for RGBA.

The ZLIB compression level, compression, can be -1 for the PNG-encoder - default or a value from 0 to 9. 9 is the highest compression level, generating - the smallest output, but is slower.

exit Source

Arguments

:: TensorType t 
=> Tensor v1 t

data: The tensor to be made available to the parent frame.

-> Tensor Value t

output: The same tensor as `data`.

Exits the current frame to its parent frame.

Exit makes its input `data` available to the parent frame.

scatterNd Source

Arguments

:: (TensorType t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor v1 tindices

indices: A Tensor. Must be one of the following types: int32, int64. - A tensor of indices into ref.

-> Tensor v2 t

updates: A Tensor. Must have the same type as tensor. A tensor of updated values - to store in ref.

-> Tensor v3 tindices

shape: A vector. The shape of the resulting tensor.

-> Tensor Value t

output: A new tensor with the given shape and updates applied according - to the indices.

Creates a new tensor by applying sparse updates to individual

values or slices within a zero tensor of the given shape tensor according to - indices. This operator is the inverse of the tf.gather_nd - operator which extracts values or slices from a given tensor.

TODO(simister): Add a link to Variable.getitem documentation on slice - syntax.

shape is a TensorShape with rank P and indices is a Tensor of rank - Q.

indices must be integer tensor, containing indices into shape. - It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.

The innermost dimension of indices (with length K) corresponds to - indices into elements (if `K = P`) or slices (if `K < P`) along the Kth - dimension of shape.

updates is Tensor of rank `Q-1+P-K` with shape:

``` - [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]]. - ```

The simplest form of scatter is to insert individual elements in a tensor by - index. For example, say we want to insert 4 scattered elements in a rank-1 - tensor with 8 elements.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterNd1.png" alt - /div

In Python, this scatter operation would look like this:

indices = tf.constant([[4], [3], [1], [7]]) - updates = tf.constant([9, 10, 11, 12]) - shape = tf.constant([8]) - scatter = tf.scatter_nd(indices, updates, shape) - with tf.Session() as sess: - print sess.run(scatter)

The resulting tensor would look like this:

0, 11, 0, 10, 9, 0, 0, 12

We can also, insert entire slices of a higher rank tensor all at once. For - example, if we wanted to insert two slices in the first dimension of a - rank-3 tensor with two matrices of new values.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterNd2.png" alt - /div

In Python, this scatter operation would look like this:

indices = tf.constant([[0], [2]]) - updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - [7, 7, 7, 7], [8, 8, 8, 8]], - [[5, 5, 5, 5], [6, 6, 6, 6], - [7, 7, 7, 7], [8, 8, 8, 8]]]) - shape = tf.constant([4, 4, 4]) - scatter = tf.scatter_nd(indices, updates, shape) - with tf.Session() as sess: - print sess.run(scatter)

The resulting tensor would look like this:

[[5, 5, 5, 5
, [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[0, 0, 0, 0
, [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[5, 5, 5, 5
, [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[0, 0, 0, 0
, [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]

priorityQueue Source

Arguments

:: Build (Tensor Ref ByteString)

handle: The handle to the queue.

A queue that produces elements sorted by the first component value.

Note that the PriorityQueue requires the first component of any element - to be a scalar int64, in addition to the other elements declared by - component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue - and DequeueMany) on a PriorityQueue will all require (resp. output) one extra - entry in their input (resp. output) lists.

refSwitch Source

Arguments

:: TensorType t 
=> Tensor Ref t

data: The ref tensor to be forwarded to the appropriate output.

-> Tensor v2 Bool

pred: A scalar that specifies which output port will receive data.

-> Build (Tensor Ref t, Tensor Ref t)

(output_false, output_true)

  • output_false: If pred is false, data will be forwarded to this output.
  • output_true: If pred is true, data will be forwarded to this output.

Forwards the ref tensor `data` to the output port determined by pred.

If pred is true, the `data` input is forwarded to output_true. Otherwise, - the data goes to output_false.

See also Switch and Merge.

nextIteration Source

Arguments

:: TensorType t 
=> Tensor v1 t

data: The tensor to be made available to the next iteration.

-> Tensor Value t

output: The same tensor as `data`.

Makes its input available to the next iteration.

refNextIteration Source

Arguments

:: TensorType t 
=> Tensor Ref t

data: The tensor to be made available to the next iteration.

-> Build (Tensor Ref t)

output: The same tensor as `data`.

Makes its input available to the next iteration.

batchMatMul Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int32, Word16, Double, Float]` t) 
=> Tensor v1 t

x: 3-D or higher with shape `[..., r_x, c_x]`.

-> Tensor v2 t

y: 3-D or higher with shape `[..., r_y, c_y]`.

-> Tensor Value t

output: 3-D or higher with shape `[..., r_o, c_o]`

Multiplies slices of two tensors in batches.

Multiplies all slices of Tensor x and y (each slice can be - viewed as an element of a batch), and arranges the individual results - in a single output tensor of the same batch size. Each of the - individual slices can optionally be adjointed (to adjoint a matrix - means to transpose and conjugate it) before multiplication by setting - the adj_x or adj_y flag to True, which are by default False.

The input tensors x and y are 3-D or higher with shape `[..., r_x, c_x]` - and `[..., r_y, c_y]`.

The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, where:

r_o = c_x if adj_x else r_x - c_o = r_y if adj_y else c_y

It is computed as:

output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])

refSelect Source

Arguments

:: TensorType t 
=> Tensor v1 Int32

index: A scalar that determines the input that gets selected.

-> [Tensor Ref t]

inputs: A list of ref tensors, one of which will be forwarded to output.

-> Build (Tensor Ref t)

output: The forwarded tensor.

Forwards the indexth element of inputs to output.

mean Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tidx, OneOf `[Int32, Int64]` tidx) 
=> Tensor v1 t

input: The tensor to reduce.

-> Tensor v2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Value t

output: The reduced tensor.

Computes the mean of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless - keep_dims is true, the rank of the tensor is reduced by 1 for each entry in - reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

scatterAdd Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, TensorType tindices, OneOf `[Int32, Int64]` tindices) 
=> Tensor Ref t

ref: Should be from a Variable node.

-> Tensor v2 tindices

indices: A tensor of indices into the first dimension of ref.

-> Tensor v3 t

updates: A tensor of updated values to add to ref.

-> Build (Tensor Ref t)

output_ref: = Same as ref. Returned as a convenience for operations that want - to use the updated values after the update is done.

Adds sparse updates to a variable reference.

This operation computes

# Scalar indices - ref[indices, ...] += updates[...]

# Vector indices (for each i) - ref[indices[i], ...] += updates[i, ...]

# High rank indices (for each i, ..., j) - ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]

This operation outputs ref after the update is done. - This makes it easier to chain operations that need to use the reset value.

Duplicate entries are handled correctly: if multiple indices reference - the same location, their contributions add.

Requires `updates.shape = indices.shape + ref.shape[1:]`.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" - style="width:100%" src="../../images/ScatterAdd.png" alt - /div

randomCrop Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word8, Double, Float]` t) 
=> Tensor v1 t

image: 3-D of shape `[height, width, channels]`.

-> Tensor v2 Int64

size: 1-D of length 2 containing: crop_height, crop_width..

-> Build (Tensor Value t)

output: 3-D of shape `[crop_height, crop_width, channels].`

Randomly crop image.

size is a 1-D int64 tensor with 2 elements representing the crop height and - width. The values must be non negative.

This Op picks a random location in image and crops a height by width - rectangle from that location. The random location is picked so the cropped - area will fit inside the original image.

refExit Source

Arguments

:: TensorType t 
=> Tensor Ref t

data: The tensor to be made available to the parent frame.

-> Build (Tensor Ref t)

output: The same tensor as `data`.

Exits the current frame to its parent frame.

Exit makes its input `data` available to the parent frame.

readerSerializeState Source

Arguments

:: Tensor Ref ByteString

reader_handle: Handle to a Reader.

-> Build (Tensor Value ByteString)

state

Produce a string tensor that encodes the state of a Reader.

Not all Readers support being serialized, so this can produce an - Unimplemented error.

tanhGrad Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t) 
=> Tensor v1 t

x

-> Tensor v2 t

y

-> Tensor Value t

z

Computes the gradient for the tanh of x wrt its input.

Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and dy - is the corresponding input gradient.

sparseSparseMaximum Source

Arguments

:: (TensorType t, OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 Int64

a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, in the canonical lexicographic ordering.

-> Tensor v2 t

a_values: 1-D. N non-empty values corresponding to a_indices.

-> Tensor v3 Int64

a_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v4 Int64

b_indices: counterpart to a_indices for the other operand.

-> Tensor v5 t

b_values: counterpart to a_values for the other operand; must be of the same dtype.

-> Tensor v6 Int64

b_shape: counterpart to a_shape for the other operand; the two shapes must be equal.

-> (Tensor Value Int64, Tensor Value t)

(output_indices, output_values)

  • output_indices: 2-D. The indices of the output SparseTensor.
  • output_values: 1-D. The values of the output SparseTensor.

Returns the element-wise max of two SparseTensors.

Assumes the two SparseTensors have the same shape, i.e., no broadcasting.

decodeGif Source

Arguments

:: Tensor v1 ByteString

contents: 0-D. The GIF-encoded image.

-> Tensor Value Word8

image: 4-D with shape `[num_frames, height, width, 3]`. RGB order

Decode the first frame of a GIF-encoded image to a uint8 tensor.

GIF with frame or transparency compression are not supported - convert animated GIF from compressed to uncompressed by:

convert $src.gif -coalesce $dst.gif

substr Source

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` t) 
=> Tensor v1 ByteString

input: Tensor of strings

-> Tensor v2 t

pos: Scalar defining the position of first character in each substring

-> Tensor v3 t

len: Scalar defining the number of characters to include in each substring

-> Tensor Value ByteString

output: Tensor of substrings

Return substrings from Tensor of strings.

For each string in the input Tensor, creates a substring starting at index + time than `tf.string_to_hash_bucket_fast`.

stringToHashBucketStrong'

Arguments

:: OpParams 
-> Int64

num_buckets: The number of buckets.

-> Tensor v'1 ByteString

input: The strings to assign a hash bucket.

-> Tensor Build Int64

output: A Tensor of the same shape as the input string_tensor.

stringToNumber

Arguments

:: OneOf `[Int32, Float]` out_type 
=> Tensor v'1 ByteString

string_tensor

-> Tensor Build out_type

output: A Tensor of the same shape as the input string_tensor.

Converts each string in the input Tensor to the specified numeric type.

(Note that int32 overflow results in an error while float overflow + results in a rounded value.)

stringToNumber'

Arguments

:: OneOf `[Int32, Float]` out_type 
=> OpParams 
-> Tensor v'1 ByteString

string_tensor

-> Tensor Build out_type

output: A Tensor of the same shape as the input string_tensor.

sub

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns x - y element-wise.

  • NOTE*: Sub supports broadcasting. More about broadcasting + here

sub'

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

substr

Arguments

:: OneOf `[Int32, Int64]` t 
=> Tensor v'1 ByteString

input: Tensor of strings

-> Tensor v'2 t

pos: Scalar defining the position of first character in each substring

-> Tensor v'3 t

len: Scalar defining the number of characters to include in each substring

-> Tensor Build ByteString

output: Tensor of substrings

Return substrings from Tensor of strings.

For each string in the input Tensor, creates a substring starting at index pos with a total length of len.

If len defines a substring that would extend beyond the length of the input string, then as many characters as possible are used.

If pos is negative or specifies a character index larger than any of the input strings, then an InvalidArgumentError is thrown.

pos and len must have the same shape, otherwise a ValueError is thrown on @@ -2591,8 +3407,250 @@ window.onload = function () {pageLoad();setSynopsis("mini_TensorFlow-GenOps-Core input = bthirteen position = [1, 5, 7] length = [3, 2, 1]

output = [bhir, bee, b'n"] - ```

lookupTableInsert Source

Arguments

:: (TensorType tin, TensorType tout) 
=> Tensor Ref ByteString

table_handle: Handle to the table.

-> Tensor v2 tin

keys: Any shape. Keys to look up.

-> Tensor v3 tout

values: Values to associate with keys.

-> Build ControlNode 

Updates the table to associates keys with values.

The tensor keys must be of the same type as the keys of the table. - The tensor values must be of the type of the table values.

sparseDenseCwiseDiv Source

Arguments

:: (TensorType t, OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t) 
=> Tensor v1 Int64

sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - SparseTensor, possibly not in canonical ordering.

-> Tensor v2 t

sp_values: 1-D. N non-empty values corresponding to sp_indices.

-> Tensor v3 Int64

sp_shape: 1-D. Shape of the input SparseTensor.

-> Tensor v4 t

dense: R-D. The dense Tensor operand.

-> Tensor Value t

output: 1-D. The N values that are operated on.

Component-wise divides a SparseTensor by a dense Tensor.

  • Limitation*: this Op only broadcasts the dense side to the sparse side, but not - the other direction.

lookupTableImport Source

Arguments

:: (TensorType tin, TensorType tout) 
=> Tensor Ref ByteString

table_handle: Handle to the table.

-> Tensor v2 tin

keys: Any shape. Keys to look up.

-> Tensor v3 tout

values: Values to associate with keys.

-> Build ControlNode 

Replaces the contents of the table with the specified keys and values.

The tensor keys must be of the same type as the keys of the table. - The tensor values must be of the type of the table values.

\ No newline at end of file + ```

substr'

Arguments

:: OneOf `[Int32, Int64]` t 
=> OpParams 
-> Tensor v'1 ByteString

input: Tensor of strings

-> Tensor v'2 t

pos: Scalar defining the position of first character in each substring

-> Tensor v'3 t

len: Scalar defining the number of characters to include in each substring

-> Tensor Build ByteString

output: Tensor of substrings

sum

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> Tensor v'1 t

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build t

output: The reduced tensor.

Computes the sum of elements across dimensions of a tensor.

Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

sum'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tidx) 
=> OpParams 
-> Tensor v'1 t

input: The tensor to reduce.

-> Tensor v'2 tidx

reduction_indices: The dimensions to reduce.

-> Tensor Build t

output: The reduced tensor.

svd

Arguments

:: OneOf `[Complex Double, Complex Float, Double, Float]` t 
=> Tensor v'1 t

input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + form matrices of size `[M, N]`. Let P be the minimum of M and N.

-> (Tensor Build t, Tensor Build t, Tensor Build t)

(s, u, v)

  • s: Singular values. Shape is `[..., P]`.
  • u: Left singular vectors. If full_matrices is False then shape is + `[..., M, P]`; if full_matrices is True then shape is + `[..., M, M]`. Undefined if compute_uv is False.
  • v: Left singular vectors. If full_matrices is False then shape is + `[..., N, P]`. If full_matrices is True then shape is `[..., N, N]`. + Undefined if compute_uv is false.

Computes the singular value decompositions of one or more matrices.

Computes the SVD of each inner matrix in input such that + `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`

```prettyprint + # a is a tensor containing a batch of matrices. + # s is a tensor of singular values for each matrix. + # u is the tensor containing of left singular vectors for each matrix. + # v is the tensor containing of right singular vectors for each matrix. + s, u, v = svd(a) + s, _, _ = svd(a, compute_uv=False) + ```

svd'

Arguments

:: OneOf `[Complex Double, Complex Float, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + form matrices of size `[M, N]`. Let P be the minimum of M and N.

-> (Tensor Build t, Tensor Build t, Tensor Build t)

(s, u, v)

  • s: Singular values. Shape is `[..., P]`.
  • u: Left singular vectors. If full_matrices is False then shape is + `[..., M, P]`; if full_matrices is True then shape is + `[..., M, M]`. Undefined if compute_uv is False.
  • v: Left singular vectors. If full_matrices is False then shape is + `[..., N, P]`. If full_matrices is True then shape is `[..., N, N]`. + Undefined if compute_uv is false.

switch

Arguments

:: TensorType t 
=> Tensor v'1 t

data: The tensor to be forwarded to the appropriate output.

-> Tensor v'2 Bool

pred: A scalar that specifies which output port will receive data.

-> (Tensor Build t, Tensor Build t)

(output_false, output_true)

  • output_false: If pred is false, data will be forwarded to this output.
  • output_true: If pred is true, data will be forwarded to this output.

Forwards `data` to the output port determined by pred.

If pred is true, the `data` input is forwarded to output_true. Otherwise, + the data goes to output_false.

See also RefSwitch and Merge.

switch'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

data: The tensor to be forwarded to the appropriate output.

-> Tensor v'2 Bool

pred: A scalar that specifies which output port will receive data.

-> (Tensor Build t, Tensor Build t)

(output_false, output_true)

  • output_false: If pred is false, data will be forwarded to this output.
  • output_true: If pred is true, data will be forwarded to this output.

tFRecordReader

Arguments

:: MonadBuild m' 
=> m' (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

A Reader that outputs the records from a TensorFlow Records file.

tFRecordReader'

Arguments

:: MonadBuild m' 
=> OpParams 
-> m' (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

tFRecordReaderV2

Arguments

:: MonadBuild m' 
=> m' ResourceHandle

reader_handle: The handle to reference the Reader.

A Reader that outputs the records from a TensorFlow Records file.

tFRecordReaderV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> m' ResourceHandle

reader_handle: The handle to reference the Reader.

takeManySparseFromTensorsMap

Arguments

:: (MonadBuild m', TensorType dtype) 
=> Tensor v'1 Int64

sparse_handles: 1-D, The N serialized SparseTensor objects. + Shape: `[N]`.

-> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

(sparse_indices, sparse_values, sparse_shape)

  • sparse_indices: 2-D. The indices of the minibatch SparseTensor.
  • sparse_values: 1-D. The values of the minibatch SparseTensor.
  • sparse_shape: 1-D. The shape of the minibatch SparseTensor.

Read SparseTensors from a SparseTensorsMap and concatenate them.

The input sparse_handles must be an int64 matrix of shape `[N, 1]` where + N is the minibatch size and the rows correspond to the output handles of + AddSparseToTensorsMap or AddManySparseToTensorsMap. The ranks of the + original SparseTensor objects that went into the given input ops must all + match. When the final SparseTensor is created, it has rank one + higher than the ranks of the incoming SparseTensor objects + (they have been concatenated along a new row dimension on the left).

The output SparseTensor object's shape values for all dimensions but the + first are the max across the input SparseTensor objects' shape values + for the corresponding dimensions. Its first shape value is N, the minibatch + size.

The input SparseTensor objects' indices are assumed ordered in + standard lexicographic order. If this is not the case, after this + step run SparseReorder to restore index ordering.

For example, if the handles represent an input, which is a `[2, 3]` matrix + representing two original SparseTensor objects:

``` + index = [ 0] + [10] + [20] + values = [1, 2, 3] + shape = [50] + ```

and

``` + index = [ 2] + [10] + values = [4, 5] + shape = [30] + ```

then the final SparseTensor will be:

``` + index = [0 0] + [0 10] + [0 20] + [1 2] + [1 10] + values = [1, 2, 3, 4, 5] + shape = [2 50] + ```

takeManySparseFromTensorsMap'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> Tensor v'1 Int64

sparse_handles: 1-D, The N serialized SparseTensor objects. + Shape: `[N]`.

-> m' (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)

(sparse_indices, sparse_values, sparse_shape)

  • sparse_indices: 2-D. The indices of the minibatch SparseTensor.
  • sparse_values: 1-D. The values of the minibatch SparseTensor.
  • sparse_shape: 1-D. The shape of the minibatch SparseTensor.

tan

Arguments

:: OneOf `[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes tan of x element-wise.

tanh

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor Build t

y

Computes hyperbolic tangent of x element-wise.

tanh'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor Build t

y

tanhGrad

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Computes the gradient for the tanh of x wrt its input.

Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and dy + is the corresponding input gradient.

tanhGrad'

Arguments

:: OneOf `[Complex Double, Complex Float, Word16, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

temporaryVariable

Arguments

:: (MonadBuild m', TensorType dtype) 
=> Shape

shape: The shape of the variable tensor.

-> m' (Tensor Ref dtype)

ref: A reference to the variable tensor.

Returns a tensor that may be mutated, but only persists within a single step.

This is an experimental op for internal use only and it is possible to use this + op in unsafe ways. DO NOT USE unless you fully understand the risks.

It is the caller's responsibility to ensure that ref is eventually passed to a + matching DestroyTemporaryVariable op after all other uses have completed.

Outputs a ref to the tensor state so it may be read or modified.

E.g. + var = state_ops._temporary_variable([1, 2], types.float_) + var_name = var.op.name + var = state_ops.assign(var, [[4.0, 5.0]]) + var = state_ops.assign_add(var, [[6.0, 7.0]]) + final = state_ops._destroy_temporary_variable(var, var_name=var_name)

temporaryVariable'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> Shape

shape: The shape of the variable tensor.

-> m' (Tensor Ref dtype)

ref: A reference to the variable tensor.

tensorArray

Arguments

:: MonadBuild m' 
=> DataType

dtype

-> Tensor v'1 Int32

size

-> m' (Tensor Ref ByteString)

handle

tensorArray'

Arguments

:: MonadBuild m' 
=> OpParams 
-> DataType

dtype

-> Tensor v'1 Int32

size

-> m' (Tensor Ref ByteString)

handle

tensorArrayCloseV2

Arguments

:: MonadBuild m' 
=> Tensor v'1 ByteString

handle

-> m' ControlNode 

Deprecated. Use TensorArrayCloseV3

tensorArrayCloseV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor v'1 ByteString

handle

-> m' ControlNode 

tensorArrayCloseV3

Arguments

:: MonadBuild m' 
=> ResourceHandle

handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

-> m' ControlNode 

Delete the TensorArray from its resource container. This enables

the user to close and release the resource in the middle of a step/run.

tensorArrayCloseV3'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

-> m' ControlNode 

tensorArrayConcat

Arguments

:: (MonadBuild m', TensorType dtype) 
=> Tensor Ref ByteString

handle

-> Tensor v'2 Float

flow_in

-> m' (Tensor Value dtype, Tensor Value Int64)

(value, lengths)

  • value
  • lengths

tensorArrayConcat'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> Tensor Ref ByteString

handle

-> Tensor v'2 Float

flow_in

-> m' (Tensor Value dtype, Tensor Value Int64)

(value, lengths)

  • value
  • lengths

tensorArrayConcatV2

Arguments

:: TensorType dtype 
=> Tensor v'1 ByteString

handle

-> Tensor v'2 Float

flow_in

-> (Tensor Build dtype, Tensor Build Int64)

(value, lengths)

  • value
  • lengths

Deprecated. Use TensorArrayConcatV3

tensorArrayConcatV2'

Arguments

:: TensorType dtype 
=> OpParams 
-> Tensor v'1 ByteString

handle

-> Tensor v'2 Float

flow_in

-> (Tensor Build dtype, Tensor Build Int64)

(value, lengths)

  • value
  • lengths

tensorArrayConcatV3

Arguments

:: (MonadBuild m', TensorType dtype) 
=> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value dtype, Tensor Value Int64)

(value, lengths)

  • value: All of the elements in the TensorArray, concatenated along the first + axis.
  • lengths: A vector of the row sizes of the original T elements in the + value output. In the example above, this would be the values: + `(n1, n2, ..., n(T-1))`.

Concat the elements from the TensorArray into value value.

Takes T elements of shapes

``` + (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) + ```

and concatenates them into a Tensor of shape:

```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```

All elements must have the same shape (excepting the first dimension).

tensorArrayConcatV3'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value dtype, Tensor Value Int64)

(value, lengths)

  • value: All of the elements in the TensorArray, concatenated along the first + axis.
  • lengths: A vector of the row sizes of the original T elements in the + value output. In the example above, this would be the values: + `(n1, n2, ..., n(T-1))`.

tensorArrayGather

Arguments

:: (MonadBuild m', TensorType dtype) 
=> Tensor Ref ByteString

handle

-> Tensor v'2 Int32

indices

-> Tensor v'3 Float

flow_in

-> m' (Tensor Value dtype)

value

tensorArrayGather'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> Tensor Ref ByteString

handle

-> Tensor v'2 Int32

indices

-> Tensor v'3 Float

flow_in

-> m' (Tensor Value dtype)

value

tensorArrayGatherV2

Arguments

:: TensorType dtype 
=> Tensor v'1 ByteString

handle

-> Tensor v'2 Int32

indices

-> Tensor v'3 Float

flow_in

-> Tensor Build dtype

value

Deprecated. Use TensorArrayGatherV3

tensorArrayGatherV2'

Arguments

:: TensorType dtype 
=> OpParams 
-> Tensor v'1 ByteString

handle

-> Tensor v'2 Int32

indices

-> Tensor v'3 Float

flow_in

-> Tensor Build dtype

value

tensorArrayGatherV3

Arguments

:: (MonadBuild m', TensorType dtype) 
=> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 Int32

indices: The locations in the TensorArray from which to read tensor elements.

-> Tensor v'3 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value dtype)

value: All of the elements in the TensorArray, concatenated along a new + axis (the new dimension 0).

Gather specific elements from the TensorArray into output value.

All elements selected by indices must have the same shape.

tensorArrayGatherV3'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 Int32

indices: The locations in the TensorArray from which to read tensor elements.

-> Tensor v'3 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value dtype)

value: All of the elements in the TensorArray, concatenated along a new + axis (the new dimension 0).

tensorArrayGrad

Arguments

:: MonadBuild m' 
=> Tensor v'1 ByteString

handle

-> Tensor v'2 Float

flow_in

-> m' (Tensor Ref ByteString)

grad_handle

tensorArrayGrad'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor v'1 ByteString

handle

-> Tensor v'2 Float

flow_in

-> m' (Tensor Ref ByteString)

grad_handle

tensorArrayGradV2

Arguments

:: MonadBuild m' 
=> Tensor v'1 ByteString

handle

-> Tensor v'2 Float

flow_in

-> m' (Tensor Value ByteString)

grad_handle

Deprecated. Use TensorArrayGradV3

tensorArrayGradV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor v'1 ByteString

handle

-> Tensor v'2 Float

flow_in

-> m' (Tensor Value ByteString)

grad_handle

tensorArrayGradV3

Arguments

:: MonadBuild m' 
=> ResourceHandle

handle: The handle to the forward TensorArray.

-> Tensor v'2 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (ResourceHandle, Tensor Value Float)

(grad_handle, flow_out)

  • grad_handle
  • flow_out

Creates a TensorArray for storing the gradients of values in the given handle.

If the given TensorArray gradient already exists, returns a reference to it.

Locks the size of the original TensorArray by disabling its dynamic size flag.

  • *A note about the input flow_in:**

The handle flow_in forces the execution of the gradient lookup to occur + only after certain other operations have occurred. For example, when + the forward TensorArray is dynamically sized, writes to this TensorArray + may resize the object. The gradient TensorArray is statically sized based + on the size of the forward TensorArray when this operation executes. + Furthermore, the size of the forward TensorArray is frozen by this call. + As a result, the flow is used to ensure that the call to generate the gradient + TensorArray only happens after all writes are executed.

In the case of dynamically sized TensorArrays, gradient computation should + only be performed on read operations that have themselves been chained via + flow to occur only after all writes have executed. That way the final size + of the forward TensorArray is known when this operation is called.

  • *A note about the source attribute:**

TensorArray gradient calls use an accumulator TensorArray object. If + multiple gradients are calculated and run in the same session, the multiple + gradient nodes may accidentally flow throuth the same accumulator TensorArray. + This double counts and generally breaks the TensorArray gradient flow.

The solution is to identify which gradient call this particular + TensorArray gradient is being called in. This is performed by identifying + a unique string (e.g. "gradients", "gradients_1", ...) from the input + gradient Tensor's name. This string is used as a suffix when creating + the TensorArray gradient object here (the attribute source).

The attribute source is added as a suffix to the forward TensorArray's + name when performing the creation / lookup, so that each separate gradient + calculation gets its own TensorArray accumulator.

tensorArrayGradV3'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

handle: The handle to the forward TensorArray.

-> Tensor v'2 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (ResourceHandle, Tensor Value Float)

(grad_handle, flow_out)

  • grad_handle
  • flow_out

tensorArrayPack

Arguments

:: (MonadBuild m', TensorType dtype) 
=> Tensor Ref ByteString

handle

-> Tensor v'2 Float

flow_in

-> m' (Tensor Value dtype)

value

tensorArrayPack'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> Tensor Ref ByteString

handle

-> Tensor v'2 Float

flow_in

-> m' (Tensor Value dtype)

value

tensorArrayRead

Arguments

:: (MonadBuild m', TensorType dtype) 
=> Tensor Ref ByteString

handle

-> Tensor v'2 Int32

index

-> Tensor v'3 Float

flow_in

-> m' (Tensor Value dtype)

value

tensorArrayRead'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> Tensor Ref ByteString

handle

-> Tensor v'2 Int32

index

-> Tensor v'3 Float

flow_in

-> m' (Tensor Value dtype)

value

tensorArrayReadV2

Arguments

:: TensorType dtype 
=> Tensor v'1 ByteString

handle

-> Tensor v'2 Int32

index

-> Tensor v'3 Float

flow_in

-> Tensor Build dtype

value

Deprecated. Use TensorArrayReadV3

tensorArrayReadV2'

Arguments

:: TensorType dtype 
=> OpParams 
-> Tensor v'1 ByteString

handle

-> Tensor v'2 Int32

index

-> Tensor v'3 Float

flow_in

-> Tensor Build dtype

value

tensorArrayReadV3

Arguments

:: (MonadBuild m', TensorType dtype) 
=> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 Int32

index

-> Tensor v'3 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value dtype)

value: The tensor that is read from the TensorArray.

Read an element from the TensorArray into output value.

tensorArrayReadV3'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 Int32

index

-> Tensor v'3 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value dtype)

value: The tensor that is read from the TensorArray.

tensorArrayScatter

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref ByteString

handle

-> Tensor v'2 Int32

indices

-> Tensor v'3 t

value

-> Tensor v'4 Float

flow_in

-> m' (Tensor Value Float)

flow_out

tensorArrayScatter'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref ByteString

handle

-> Tensor v'2 Int32

indices

-> Tensor v'3 t

value

-> Tensor v'4 Float

flow_in

-> m' (Tensor Value Float)

flow_out

tensorArrayScatterV2

Arguments

:: TensorType t 
=> Tensor v'1 ByteString

handle

-> Tensor v'2 Int32

indices

-> Tensor v'3 t

value

-> Tensor v'4 Float

flow_in

-> Tensor Build Float

flow_out

Deprecated. Use TensorArrayScatterV3

tensorArrayScatterV2'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 ByteString

handle

-> Tensor v'2 Int32

indices

-> Tensor v'3 t

value

-> Tensor v'4 Float

flow_in

-> Tensor Build Float

flow_out

tensorArrayScatterV3

Arguments

:: (MonadBuild m', TensorType t) 
=> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 Int32

indices: The locations at which to write the tensor elements.

-> Tensor v'3 t

value: The concatenated tensor to write to the TensorArray.

-> Tensor v'4 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value Float)

flow_out: A float scalar that enforces proper chaining of operations.

Scatter the data from the input value into specific TensorArray elements.

indices must be a vector, its length must match the first dim of value.

tensorArrayScatterV3'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 Int32

indices: The locations at which to write the tensor elements.

-> Tensor v'3 t

value: The concatenated tensor to write to the TensorArray.

-> Tensor v'4 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value Float)

flow_out: A float scalar that enforces proper chaining of operations.

tensorArraySize

Arguments

:: MonadBuild m' 
=> Tensor Ref ByteString

handle

-> Tensor v'2 Float

flow_in

-> m' (Tensor Value Int32)

size

tensorArraySize'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor Ref ByteString

handle

-> Tensor v'2 Float

flow_in

-> m' (Tensor Value Int32)

size

tensorArraySizeV2

Arguments

:: Tensor v'1 ByteString

handle

-> Tensor v'2 Float

flow_in

-> Tensor Build Int32

size

Deprecated. Use TensorArraySizeV3

tensorArraySizeV2'

Arguments

:: OpParams 
-> Tensor v'1 ByteString

handle

-> Tensor v'2 Float

flow_in

-> Tensor Build Int32

size

tensorArraySizeV3

Arguments

:: MonadBuild m' 
=> ResourceHandle

handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

-> Tensor v'2 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value Int32)

size: The current size of the TensorArray.

Get the current size of the TensorArray.

tensorArraySizeV3'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).

-> Tensor v'2 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value Int32)

size: The current size of the TensorArray.

tensorArraySplit

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref ByteString

handle

-> Tensor v'2 t

value

-> Tensor v'3 Int64

lengths

-> Tensor v'4 Float

flow_in

-> m' (Tensor Value Float)

flow_out

tensorArraySplit'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref ByteString

handle

-> Tensor v'2 t

value

-> Tensor v'3 Int64

lengths

-> Tensor v'4 Float

flow_in

-> m' (Tensor Value Float)

flow_out

tensorArraySplitV2

Arguments

:: TensorType t 
=> Tensor v'1 ByteString

handle

-> Tensor v'2 t

value

-> Tensor v'3 Int64

lengths

-> Tensor v'4 Float

flow_in

-> Tensor Build Float

flow_out

Deprecated. Use TensorArraySplitV3

tensorArraySplitV2'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 ByteString

handle

-> Tensor v'2 t

value

-> Tensor v'3 Int64

lengths

-> Tensor v'4 Float

flow_in

-> Tensor Build Float

flow_out

tensorArraySplitV3

Arguments

:: (MonadBuild m', TensorType t) 
=> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 t

value: The concatenated tensor to write to the TensorArray.

-> Tensor v'3 Int64

lengths: The vector of lengths, how to split the rows of value into the + TensorArray.

-> Tensor v'4 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value Float)

flow_out: A float scalar that enforces proper chaining of operations.

Split the data from the input value into TensorArray elements.

Assuming that lengths takes on values

```(n0, n1, ..., n(T-1))```

and that value has shape

```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,

this splits values into a TensorArray with T tensors.

TensorArray index t will be the subtensor of values with starting position

```(n0 + n1 + ... + n(t-1), 0, 0, ...)```

and having size

```nt x d0 x d1 x ...```

tensorArraySplitV3'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 t

value: The concatenated tensor to write to the TensorArray.

-> Tensor v'3 Int64

lengths: The vector of lengths, how to split the rows of value into the + TensorArray.

-> Tensor v'4 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value Float)

flow_out: A float scalar that enforces proper chaining of operations.

tensorArrayUnpack

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref ByteString

handle

-> Tensor v'2 t

value

-> Tensor v'3 Float

flow_in

-> m' (Tensor Value Float)

flow_out

tensorArrayUnpack'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref ByteString

handle

-> Tensor v'2 t

value

-> Tensor v'3 Float

flow_in

-> m' (Tensor Value Float)

flow_out

tensorArrayV2

Arguments

:: MonadBuild m' 
=> DataType

dtype

-> Tensor v'1 Int32

size

-> m' (Tensor Value ByteString)

handle

Deprecated. Use TensorArrayV3

tensorArrayV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> DataType

dtype

-> Tensor v'1 Int32

size

-> m' (Tensor Value ByteString)

handle

tensorArrayV3

Arguments

:: MonadBuild m' 
=> DataType

dtype: The type of the elements on the tensor_array.

-> Tensor v'1 Int32

size: The size of the array.

-> m' (ResourceHandle, Tensor Value Float)

(handle, flow)

  • handle: The handle to the TensorArray.
  • flow: A scalar used to control gradient flow.

An array of Tensors of given size, with data written via Write and read

via Read or Pack.

tensorArrayV3'

Arguments

:: MonadBuild m' 
=> OpParams 
-> DataType

dtype: The type of the elements on the tensor_array.

-> Tensor v'1 Int32

size: The size of the array.

-> m' (ResourceHandle, Tensor Value Float)

(handle, flow)

  • handle: The handle to the TensorArray.
  • flow: A scalar used to control gradient flow.

tensorArrayWrite

Arguments

:: (MonadBuild m', TensorType t) 
=> Tensor Ref ByteString

handle

-> Tensor v'2 Int32

index

-> Tensor v'3 t

value

-> Tensor v'4 Float

flow_in

-> m' (Tensor Value Float)

flow_out

tensorArrayWrite'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Tensor Ref ByteString

handle

-> Tensor v'2 Int32

index

-> Tensor v'3 t

value

-> Tensor v'4 Float

flow_in

-> m' (Tensor Value Float)

flow_out

tensorArrayWriteV2

Arguments

:: TensorType t 
=> Tensor v'1 ByteString

handle

-> Tensor v'2 Int32

index

-> Tensor v'3 t

value

-> Tensor v'4 Float

flow_in

-> Tensor Build Float

flow_out

Deprecated. Use TensorArrayGradV3

tensorArrayWriteV2'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 ByteString

handle

-> Tensor v'2 Int32

index

-> Tensor v'3 t

value

-> Tensor v'4 Float

flow_in

-> Tensor Build Float

flow_out

tensorArrayWriteV3

Arguments

:: (MonadBuild m', TensorType t) 
=> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 Int32

index: The position to write to inside the TensorArray.

-> Tensor v'3 t

value: The tensor to write to the TensorArray.

-> Tensor v'4 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value Float)

flow_out: A float scalar that enforces proper chaining of operations.

Push an element onto the tensor_array.

tensorArrayWriteV3'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> ResourceHandle

handle: The handle to a TensorArray.

-> Tensor v'2 Int32

index: The position to write to inside the TensorArray.

-> Tensor v'3 t

value: The tensor to write to the TensorArray.

-> Tensor v'4 Float

flow_in: A float scalar that enforces proper chaining of operations.

-> m' (Tensor Value Float)

flow_out: A float scalar that enforces proper chaining of operations.

tensorSummary

Arguments

:: TensorType t 
=> Tensor v'1 t

tensor: A tensor to serialize.

-> Tensor Build ByteString

summary

Outputs a Summary protocol buffer with a tensor.

tensorSummary'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

tensor: A tensor to serialize.

-> Tensor Build ByteString

summary

textLineReader

Arguments

:: MonadBuild m' 
=> m' (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

A Reader that outputs the lines of a file delimited by '\n'.

textLineReader'

Arguments

:: MonadBuild m' 
=> OpParams 
-> m' (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

textLineReaderV2

Arguments

:: MonadBuild m' 
=> m' ResourceHandle

reader_handle: The handle to reference the Reader.

A Reader that outputs the lines of a file delimited by '\n'.

textLineReaderV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> m' ResourceHandle

reader_handle: The handle to reference the Reader.

threadUnsafeUnigramCandidateSampler

Arguments

:: Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

Generates labels for candidate sampling with a learned unigram distribution.

See explanations of candidate sampling and the data formats at + go/candidate-sampling.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

threadUnsafeUnigramCandidateSampler'

Arguments

:: OpParams 
-> Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

tile

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tmultiples) 
=> Tensor v'1 t

input: 1-D or higher.

-> Tensor v'2 tmultiples

multiples: 1-D. Length must be the same as the number of dimensions in input

-> Tensor Build t

output

Constructs a tensor by tiling a given tensor.

This operation creates a new tensor by replicating input multiples times. + The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, + and the values of input are replicated `multiples[i]` times along the ith + dimension. For example, tiling `[a b c d]` by `[2]` produces + `[a b c d a b c d]`.

tile'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tmultiples) 
=> OpParams 
-> Tensor v'1 t

input: 1-D or higher.

-> Tensor v'2 tmultiples

multiples: 1-D. Length must be the same as the number of dimensions in input

-> Tensor Build t

output

tileGrad

Arguments

:: TensorType t 
=> Tensor v'1 t

input

-> Tensor v'2 Int32

multiples

-> Tensor Build t

output

Returns the gradient of Tile.

Since Tile takes an input and repeats the input multiples times + along each dimension, TileGrad takes in multiples and aggregates + each repeated tile of input into output.

tileGrad'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

input

-> Tensor v'2 Int32

multiples

-> Tensor Build t

output

topK

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Int64

k: Number of top elements to look for along the last dimension (along each + row for matrices).

-> Tensor v'1 t

input: 1-D or higher with last dimension at least k.

-> (Tensor Build t, Tensor Build Int32)

(values, indices)

  • values: The k largest elements along each last dimensional slice.
  • indices: The indices of values within the last dimension of input.

Finds values and indices of the k largest elements for the last dimension.

If the input is a vector (rank-1), finds the k largest entries in the vector + and outputs their values and indices as vectors. Thus `values[j]` is the + j-th largest entry in input, and its index is `indices[j]`.

For matrices (resp. higher rank input), computes the top k entries in each + row (resp. vector along the last dimension). Thus,

values.shape = indices.shape = input.shape[:-1] + [k]

If two elements are equal, the lower-index element appears first.

If k varies dynamically, use TopKV2 below.

topK'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Int64

k: Number of top elements to look for along the last dimension (along each + row for matrices).

-> Tensor v'1 t

input: 1-D or higher with last dimension at least k.

-> (Tensor Build t, Tensor Build Int32)

(values, indices)

  • values: The k largest elements along each last dimensional slice.
  • indices: The indices of values within the last dimension of input.

topKV2

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

input: 1-D or higher with last dimension at least k.

-> Tensor v'2 Int32

k: 0-D. Number of top elements to look for along the last dimension (along each + row for matrices).

-> (Tensor Build t, Tensor Build Int32)

(values, indices)

  • values: The k largest elements along each last dimensional slice.
  • indices: The indices of values within the last dimension of input.

Finds values and indices of the k largest elements for the last dimension.

If the input is a vector (rank-1), finds the k largest entries in the vector + and outputs their values and indices as vectors. Thus `values[j]` is the + j-th largest entry in input, and its index is `indices[j]`.

For matrices (resp. higher rank input), computes the top k entries in each + row (resp. vector along the last dimension). Thus,

values.shape = indices.shape = input.shape[:-1] + [k]

If two elements are equal, the lower-index element appears first.

topKV2'

Arguments

:: OneOf `[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

input: 1-D or higher with last dimension at least k.

-> Tensor v'2 Int32

k: 0-D. Number of top elements to look for along the last dimension (along each + row for matrices).

-> (Tensor Build t, Tensor Build Int32)

(values, indices)

  • values: The k largest elements along each last dimensional slice.
  • indices: The indices of values within the last dimension of input.

transpose

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tperm) 
=> Tensor v'1 t

x

-> Tensor v'2 tperm

perm

-> Tensor Build t

y

Shuffle dimensions of x according to a permutation.

The output y has the same rank as x. The shapes of x and y satisfy: + `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`

transpose'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` tperm) 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 tperm

perm

-> Tensor Build t

y

truncateDiv

Arguments

:: OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns x / y element-wise for integer types.

Truncation designates that negative numbers will round fractional quantities + toward zero. I.e. -7 / 5 = 1. This matches C semantics but it is different + than Python semantics. See FloorDiv for a division function that matches + Python Semantics.

  • NOTE*: TruncateDiv supports broadcasting. More about broadcasting + here

truncateMod

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

Returns element-wise remainder of division. This emulates C semantics where

true, this follows C semantics in that the result here is consistent + with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.

  • NOTE*: Mod supports broadcasting. More about broadcasting + here

truncateMod'

Arguments

:: OneOf `[Int32, Int64, Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

y

-> Tensor Build t

z

truncatedNormal

Arguments

:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
=> Tensor v'1 t

shape: The shape of the output tensor.

-> m' (Tensor Value dtype)

output: A tensor of the specified shape filled with random truncated normal + values.

Outputs random values from a truncated normal distribution.

The generated values follow a normal distribution with mean 0 and standard + deviation 1, except that values whose magnitude is more than 2 standard + deviations from the mean are dropped and re-picked.

truncatedNormal'

Arguments

:: (MonadBuild m', OneOf `[Word16, Double, Float]` dtype, OneOf `[Int32, Int64]` t) 
=> OpParams 
-> Tensor v'1 t

shape: The shape of the output tensor.

-> m' (Tensor Value dtype)

output: A tensor of the specified shape filled with random truncated normal + values.

uniformCandidateSampler

Arguments

:: Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

Generates labels for candidate sampling with a uniform distribution.

See explanations of candidate sampling and the data formats at + go/candidate-sampling.

For each batch, this op picks a single set of sampled candidate labels.

The advantages of sampling candidates per-batch are simplicity and the + possibility of efficient dense matrix multiplication. The disadvantage is that + the sampled candidates must be chosen independently of the context and of the + true labels.

uniformCandidateSampler'

Arguments

:: OpParams 
-> Int64

num_sampled: Number of candidates to randomly sample per batch.

-> Int64

num_true: Number of true labels per context.

-> Int64

range_max: The sampler will sample integers from the interval [0, range_max).

-> Bool

unique: If unique is true, we sample with rejection, so that all sampled + candidates in a batch are unique. This requires some approximation to + estimate the post-rejection sampling probabilities.

-> Tensor v'1 Int64

true_classes: A batch_size * num_true matrix, in which each row contains the + IDs of the num_true target_classes in the corresponding original label.

-> (Tensor Build Int64, Tensor Build Float, Tensor Build Float)

(sampled_candidates, true_expected_count, sampled_expected_count)

  • sampled_candidates: A vector of length num_sampled, in which each element is + the ID of a sampled candidate.
  • true_expected_count: A batch_size * num_true matrix, representing + the number of times each candidate is expected to occur in a batch + of sampled candidates. If unique=true, then this is a probability.
  • sampled_expected_count: A vector of length num_sampled, for each sampled + candidate representing the number of times the candidate is expected + to occur in a batch of sampled candidates. If unique=true, then this is a + probability.

unique

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
=> Tensor v'1 t

x: 1-D.

-> (Tensor Build t, Tensor Build out_idx)

(y, idx)

  • y: 1-D.
  • idx: 1-D.

Finds unique elements in a 1-D tensor.

This operation returns a tensor y containing all of the unique elements of x + sorted in the same order that they occur in x. This operation also returns a + tensor idx the same size as x that contains the index of each value of x + in the unique output y. In other words:

`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`

For example:

```prettyprint + # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] + y, idx = unique(x) + y ==> [1, 2, 4, 7, 8] + idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + ```

unique'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
=> OpParams 
-> Tensor v'1 t

x: 1-D.

-> (Tensor Build t, Tensor Build out_idx)

(y, idx)

  • y: 1-D.
  • idx: 1-D.

uniqueWithCounts

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
=> Tensor v'1 t

x: 1-D.

-> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx)

(y, idx, count)

  • y: 1-D.
  • idx: 1-D.
  • count: 1-D.

Finds unique elements in a 1-D tensor.

This operation returns a tensor y containing all of the unique elements of x + sorted in the same order that they occur in x. This operation also returns a + tensor idx the same size as x that contains the index of each value of x + in the unique output y. Finally, it returns a third tensor count that + contains the count of each element of y in x. In other words:

`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`

For example:

```prettyprint + # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] + y, idx, count = unique_with_counts(x) + y ==> [1, 2, 4, 7, 8] + idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + count ==> [2, 1, 3, 1, 2] + ```

uniqueWithCounts'

Arguments

:: (TensorType t, OneOf `[Int32, Int64]` out_idx) 
=> OpParams 
-> Tensor v'1 t

x: 1-D.

-> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx)

(y, idx, count)

  • y: 1-D.
  • idx: 1-D.
  • count: 1-D.

unpack

Arguments

:: TensorType t 
=> Int64

num

-> Tensor v'1 t

value: 1-D or higher, with axis dimension size equal to num.

-> [Tensor Build t]

output: The list of tensors unpacked from value.

Unpacks a given dimension of a rank-R tensor into num rank-`(R-1)` tensors.

Unpacks num tensors from value by chipping it along the axis dimension. + For example, given a tensor of shape `(A, B, C, D)`;

If `axis == 0` then the i'th tensor in output is the slice `value[i, :, :, :]` + and each tensor in output will have shape `(B, C, D)`. (Note that the + dimension unpacked along is gone, unlike split).

If `axis == 1` then the i'th tensor in output is the slice `value[:, i, :, :]` + and each tensor in output will have shape `(A, C, D)`. + Etc.

This is the opposite of pack.

unpack'

Arguments

:: TensorType t 
=> OpParams 
-> Int64

num

-> Tensor v'1 t

value: 1-D or higher, with axis dimension size equal to num.

-> [Tensor Build t]

output: The list of tensors unpacked from value.

unsortedSegmentSum

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A tensor whose shape is a prefix of `data.shape`.

-> Tensor v'3 Int32

num_segments

-> Tensor Build t

output: Has same shape as data, except for the first `segment_ids.rank` + dimensions, which are replaced with a single dimension which has size + num_segments.

Computes the sum along segments of a tensor.

Read the section on + Segmentation for an explanation + of segments.

Computes a tensor such that + `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such + that `segment_ids[j...] == i`. Unlike SegmentSum, segment_ids + need not be sorted and need not cover all values in the full + range of valid values.

If the sum is empty for a given segment ID i, `output[i] = 0`.

num_segments should equal the number of distinct segment IDs.

style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;" + style="width:100%" src="../../images/UnsortedSegmentSum.png" alt + /div

unsortedSegmentSum'

Arguments

:: (OneOf `[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float]` t, OneOf `[Int32, Int64]` tindices) 
=> OpParams 
-> Tensor v'1 t

data

-> Tensor v'2 tindices

segment_ids: A tensor whose shape is a prefix of `data.shape`.

-> Tensor v'3 Int32

num_segments

-> Tensor Build t

output: Has same shape as data, except for the first `segment_ids.rank` + dimensions, which are replaced with a single dimension which has size + num_segments.

unstage

Arguments

:: (MonadBuild m', TensorTypes dtypes) 
=> m' (TensorList Value dtypes)

values

Op is similar to a lightweight Dequeue. The basic funtionality is similar to

dequeue with many fewer capabilities and options. This Op is optimized for + performance.

unstage'

Arguments

:: (MonadBuild m', TensorTypes dtypes) 
=> OpParams 
-> m' (TensorList Value dtypes)

values

varHandleOp

Arguments

:: MonadBuild m' 
=> DataType

dtype: the type of this variable. Must agree with the dtypes + of all ops using this variable.

-> Shape

shape: The (possibly partially specified) shape of this variable.

-> m' ResourceHandle

resource

Creates a handle to a Variable resource.

varHandleOp'

Arguments

:: MonadBuild m' 
=> OpParams 
-> DataType

dtype: the type of this variable. Must agree with the dtypes + of all ops using this variable.

-> Shape

shape: The (possibly partially specified) shape of this variable.

-> m' ResourceHandle

resource

varIsInitializedOp

Arguments

:: MonadBuild m' 
=> ResourceHandle

resource: the input resource handle.

-> m' (Tensor Value Bool)

is_initialized: a scalar boolean which is true if the variable has been + initialized.

Checks whether a resource handle-based variable has been initialized.

varIsInitializedOp'

Arguments

:: MonadBuild m' 
=> OpParams 
-> ResourceHandle

resource: the input resource handle.

-> m' (Tensor Value Bool)

is_initialized: a scalar boolean which is true if the variable has been + initialized.

variable

Arguments

:: (MonadBuild m', TensorType dtype) 
=> Shape

shape

-> m' (Tensor Ref dtype)

ref

Use VariableV2 instead.

variable'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> Shape

shape

-> m' (Tensor Ref dtype)

ref

variableV2

Arguments

:: (MonadBuild m', TensorType dtype) 
=> Shape

shape: The shape of the variable tensor.

-> m' (Tensor Ref dtype)

ref: A reference to the variable tensor.

Holds state in the form of a tensor that persists across steps.

Outputs a ref to the tensor state so it may be read or modified. + TODO(zhifengc/mrry): Adds a pointer to a more detail document + about sharing states in tensorflow.

variableV2'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> Shape

shape: The shape of the variable tensor.

-> m' (Tensor Ref dtype)

ref: A reference to the variable tensor.

where'

Arguments

:: Tensor v'1 Bool

input

-> Tensor Build Int64

index

Returns locations of true values in a boolean tensor.

This operation returns the coordinates of true elements in input. The + coordinates are returned in a 2-D tensor where the first dimension (rows) + represents the number of true elements, and the second dimension (columns) + represents the coordinates of the true elements. Keep in mind, the shape of + the output tensor can vary depending on how many true values there are in + input. Indices are output in row-major order.

For example:

```prettyprint + # input tensor is [[True, False] + # [True, False]] + # input has two true values, so output has two coordinates. + # input has rank of 2, so coordinates have two indices. + where(input) ==> [[0, 0], + [1, 0]]

# input tensor is [[[True, False] + # [True, False]] + # [[False, True] + # [False, True]] + # [[False, False] + # [False, True]]] + # input has 5 true values, so output has 5 coordinates. + # input has rank of 3, so coordinates have three indices. + where(input) ==> [[0, 0, 0], + [0, 1, 0], + [1, 0, 1], + [1, 1, 1], + [2, 1, 1]] + ```

where''

Arguments

:: OpParams 
-> Tensor v'1 Bool

input

-> Tensor Build Int64

index

wholeFileReader

Arguments

:: MonadBuild m' 
=> m' (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

A Reader that outputs the entire contents of a file as a value.

To use, enqueue filenames in a Queue. The output of ReaderRead will + be a filename (key) and the contents of that file (value).

wholeFileReader'

Arguments

:: MonadBuild m' 
=> OpParams 
-> m' (Tensor Ref ByteString)

reader_handle: The handle to reference the Reader.

wholeFileReaderV2

Arguments

:: MonadBuild m' 
=> m' ResourceHandle

reader_handle: The handle to reference the Reader.

A Reader that outputs the entire contents of a file as a value.

To use, enqueue filenames in a Queue. The output of ReaderRead will + be a filename (key) and the contents of that file (value).

wholeFileReaderV2'

Arguments

:: MonadBuild m' 
=> OpParams 
-> m' ResourceHandle

reader_handle: The handle to reference the Reader.

writeFile

Arguments

:: MonadBuild m' 
=> Tensor v'1 ByteString

filename: scalar. The name of the file to which we write the contents.

-> Tensor v'2 ByteString

contents: scalar. The content to be written to the output file.

-> m' ControlNode 

Writes contents to the file at input filename. Creates file if not existing.

writeFile'

Arguments

:: MonadBuild m' 
=> OpParams 
-> Tensor v'1 ByteString

filename: scalar. The name of the file to which we write the contents.

-> Tensor v'2 ByteString

contents: scalar. The content to be written to the output file.

-> m' ControlNode 

zerosLike

Arguments

:: TensorType t 
=> Tensor v'1 t

x: a tensor of type T.

-> Tensor Build t

y: a tensor of the same shape and type as x but filled with zeros.

Returns a tensor of zeros with the same shape and type as x.

zerosLike'

Arguments

:: TensorType t 
=> OpParams 
-> Tensor v'1 t

x: a tensor of type T.

-> Tensor Build t

y: a tensor of the same shape and type as x but filled with zeros.

zeta

Arguments

:: OneOf `[Double, Float]` t 
=> Tensor v'1 t

x

-> Tensor v'2 t

q

-> Tensor Build t

z

Compute the Hurwitz zeta function \(zeta(x, q)\).

The Hurwitz zeta function is defined as:

``` + zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x} + ```

zeta'

Arguments

:: OneOf `[Double, Float]` t 
=> OpParams 
-> Tensor v'1 t

x

-> Tensor v'2 t

q

-> Tensor Build t

z

_Arg

Arguments

:: (MonadBuild m', TensorType t) 
=> Int64

index: This argument is the index-th argument of the function.

-> m' (Tensor Value t)

output: The argument.

A graph node which represents an argument to a function.

_Arg'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Int64

index: This argument is the index-th argument of the function.

-> m' (Tensor Value t)

output: The argument.

_ArrayToList

Arguments

:: (TensorType t, TensorTypes out_types) 
=> [Tensor v'1 t]

input

-> TensorList Build out_types

output

Converts an array of tensors to a list of tensors.

_ArrayToList'

Arguments

:: (TensorType t, TensorTypes out_types) 
=> OpParams 
-> [Tensor v'1 t]

input

-> TensorList Build out_types

output

_HostCast

Arguments

:: (TensorType srcT, TensorType dstT) 
=> Tensor v'1 srcT

x

-> Tensor Build dstT

y

Cast x of type SrcT to y of DstT.

_HostCast requires its input and produces its output in host memory.

_HostCast'

Arguments

:: (TensorType srcT, TensorType dstT) 
=> OpParams 
-> Tensor v'1 srcT

x

-> Tensor Build dstT

y

_HostRecv

Arguments

:: (MonadBuild m', TensorType tensor_type) 
=> Int64

send_device_incarnation: The current incarnation of send_device.

-> m' (Tensor Value tensor_type)

tensor: The tensor to receive.

Receives the named tensor from send_device on recv_device.

_HostRecv requires its input on host memory whereas _Recv requires its + input on device memory.

_HostRecv'

Arguments

:: (MonadBuild m', TensorType tensor_type) 
=> OpParams 
-> Int64

send_device_incarnation: The current incarnation of send_device.

-> m' (Tensor Value tensor_type)

tensor: The tensor to receive.

_HostSend

Arguments

:: (MonadBuild m', TensorType t) 
=> Int64

send_device_incarnation: The current incarnation of send_device.

-> Tensor v'1 t

tensor: The tensor to send.

-> m' ControlNode 

Sends the named tensor from send_device to recv_device.

_HostSend requires its input on host memory whereas _Send requires its + input on device memory.

_HostSend'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Int64

send_device_incarnation: The current incarnation of send_device.

-> Tensor v'1 t

tensor: The tensor to send.

-> m' ControlNode 

_ListToArray

Arguments

:: (TensorTypes tin, TensorType t) 
=> Int64

N

-> TensorList v'1 tin

input

-> [Tensor Build t]

output

Converts a list of tensors to an array of tensors.

_ListToArray'

Arguments

:: (TensorTypes tin, TensorType t) 
=> OpParams 
-> Int64

N

-> TensorList v'1 tin

input

-> [Tensor Build t]

output

_ParallelConcatStart

Arguments

:: (MonadBuild m', TensorType dtype) 
=> Shape

shape: 1-D Tensor indicating the shape of the output.

-> m' (Tensor Value dtype)

output: An empty Tensor of the specified type.

Creates an empty Tensor with shape shape and type dtype.

The memory can optionally be initialized. This is usually useful in + conjunction with inplace operations.

_ParallelConcatStart'

Arguments

:: (MonadBuild m', TensorType dtype) 
=> OpParams 
-> Shape

shape: 1-D Tensor indicating the shape of the output.

-> m' (Tensor Value dtype)

output: An empty Tensor of the specified type.

_ParallelConcatUpdate

Arguments

:: TensorType t 
=> Int64

loc: A scalar indicating the index of the first dimension such that + value[loc, :] is updated.

-> Tensor v'1 t

value: A Tensor object that will be updated in-place.

-> Tensor v'2 t

update: A Tensor of rank one less than value if loc is a scalar, + otherwise of rank equal to value that contains the new values + for value.

-> Tensor Build t

output: value that has been updated accordingly.

Updates input value at loc with update.

If you use this function you will almost certainly want to add + a control dependency as done in the implementation of parallel_stack to + avoid race conditions.

_ParallelConcatUpdate'

Arguments

:: TensorType t 
=> OpParams 
-> Int64

loc: A scalar indicating the index of the first dimension such that + value[loc, :] is updated.

-> Tensor v'1 t

value: A Tensor object that will be updated in-place.

-> Tensor v'2 t

update: A Tensor of rank one less than value if loc is a scalar, + otherwise of rank equal to value that contains the new values + for value.

-> Tensor Build t

output: value that has been updated accordingly.

_Recv

Arguments

:: (MonadBuild m', TensorType tensor_type) 
=> Int64

send_device_incarnation: The current incarnation of send_device.

-> m' (Tensor Value tensor_type)

tensor: The tensor to receive.

Receives the named tensor from send_device on recv_device.

_Recv'

Arguments

:: (MonadBuild m', TensorType tensor_type) 
=> OpParams 
-> Int64

send_device_incarnation: The current incarnation of send_device.

-> m' (Tensor Value tensor_type)

tensor: The tensor to receive.

_Retval

Arguments

:: (MonadBuild m', TensorType t) 
=> Int64

index: This return value is the index-th return value of the function.

-> Tensor v'1 t

input: The return value.

-> m' ControlNode 

A graph node which represents a return value of a function.

_Retval'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Int64

index: This return value is the index-th return value of the function.

-> Tensor v'1 t

input: The return value.

-> m' ControlNode 

_Send

Arguments

:: (MonadBuild m', TensorType t) 
=> Int64

send_device_incarnation: The current incarnation of send_device.

-> Tensor v'1 t

tensor: The tensor to send.

-> m' ControlNode 

Sends the named tensor from send_device to recv_device.

_Send'

Arguments

:: (MonadBuild m', TensorType t) 
=> OpParams 
-> Int64

send_device_incarnation: The current incarnation of send_device.

-> Tensor v'1 t

tensor: The tensor to send.

-> m' ControlNode 
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html index 0e1566a..47418de 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-95.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - _)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html index 276e8fd..7a8b0a6 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-A.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - A)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - A

abortTensorFlow.GenOps.Core
absTensorFlow.GenOps.Core
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
addTensorFlow.GenOps.Core
addManySparseToTensorsMapTensorFlow.GenOps.Core
addNTensorFlow.GenOps.Core
addSparseToTensorsMapTensorFlow.GenOps.Core
adjustContrastTensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
anyTensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
argMaxTensorFlow.GenOps.Core
argMinTensorFlow.GenOps.Core
asinTensorFlow.GenOps.Core
assignTensorFlow.GenOps.Core
assignAddTensorFlow.GenOps.Core
assignAddVariableOpTensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
atanTensorFlow.GenOps.Core
audioSummaryTensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - A

abortTensorFlow.GenOps.Core
abort'TensorFlow.GenOps.Core
absTensorFlow.GenOps.Core
abs'TensorFlow.GenOps.Core
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorApplyGradient'TensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorNumAccumulated'TensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorSetGlobalStep'TensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
accumulatorTakeGradient'TensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
acos'TensorFlow.GenOps.Core
addTensorFlow.GenOps.Core
add'TensorFlow.GenOps.Core
addManySparseToTensorsMapTensorFlow.GenOps.Core
addManySparseToTensorsMap'TensorFlow.GenOps.Core
addNTensorFlow.GenOps.Core
addN'TensorFlow.GenOps.Core
addSparseToTensorsMapTensorFlow.GenOps.Core
addSparseToTensorsMap'TensorFlow.GenOps.Core
adjustContrastTensorFlow.GenOps.Core
adjustContrast'TensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustContrastv2'TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
adjustHue'TensorFlow.GenOps.Core
adjustSaturationTensorFlow.GenOps.Core
adjustSaturation'TensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
all'TensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allCandidateSampler'TensorFlow.GenOps.Core
anyTensorFlow.GenOps.Core
any'TensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdadelta'TensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagrad'TensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdagradDA'TensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyAdam'TensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyCenteredRMSProp'TensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyFtrl'TensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyGradientDescent'TensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyMomentum'TensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalAdagrad'TensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyProximalGradientDescent'TensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
applyRMSProp'TensorFlow.GenOps.Core
argMaxTensorFlow.GenOps.Core
argMax'TensorFlow.GenOps.Core
argMinTensorFlow.GenOps.Core
argMin'TensorFlow.GenOps.Core
asinTensorFlow.GenOps.Core
asin'TensorFlow.GenOps.Core
assertTensorFlow.GenOps.Core
assert'TensorFlow.GenOps.Core
assignTensorFlow.GenOps.Core
assign'TensorFlow.GenOps.Core
assignAddTensorFlow.GenOps.Core
assignAdd'TensorFlow.GenOps.Core
assignAddVariableOpTensorFlow.GenOps.Core
assignAddVariableOp'TensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignSub'TensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
assignVariableOp'TensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asString'TensorFlow.GenOps.Core
atanTensorFlow.GenOps.Core
atan'TensorFlow.GenOps.Core
audioSummaryTensorFlow.GenOps.Core
audioSummary'TensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
audioSummaryV2'TensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool'TensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3D'TensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPool3DGrad'TensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
avgPoolGrad'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html index 86713c1..ed2064d 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-All.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index

abortTensorFlow.GenOps.Core
absTensorFlow.GenOps.Core
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
addTensorFlow.GenOps.Core
addManySparseToTensorsMapTensorFlow.GenOps.Core
addNTensorFlow.GenOps.Core
addSparseToTensorsMapTensorFlow.GenOps.Core
adjustContrastTensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
anyTensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
argMaxTensorFlow.GenOps.Core
argMinTensorFlow.GenOps.Core
asinTensorFlow.GenOps.Core
assignTensorFlow.GenOps.Core
assignAddTensorFlow.GenOps.Core
assignAddVariableOpTensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
atanTensorFlow.GenOps.Core
audioSummaryTensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
barrierTensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
bitcastTensorFlow.GenOps.Core
broadcastGradientArgsTensorFlow.GenOps.Core
castTensorFlow.GenOps.Core
ceilTensorFlow.GenOps.Core
checkNumericsTensorFlow.GenOps.Core
choleskyTensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
complexTensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
concatTensorFlow.GenOps.Core
concatOffsetTensorFlow.GenOps.Core
concatV2TensorFlow.GenOps.Core
conjTensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
controlTriggerTensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
copyTensorFlow.GenOps.Core
copyHostTensorFlow.GenOps.Core
cosTensorFlow.GenOps.Core
countUpToTensorFlow.GenOps.Core
createVariableOpTensorFlow.GenOps.Core
cropAndResizeTensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
debugIdentityTensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
decodeBase64TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
deleteSessionTensorTensorFlow.GenOps.Core
depthToSpaceTensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
deserializeManySparseTensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
diagTensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
divTensorFlow.GenOps.Core
drawBoundingBoxesTensorFlow.GenOps.Core
dynamicPartitionTensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
editDistanceTensorFlow.GenOps.Core
eluTensorFlow.GenOps.Core
eluGradTensorFlow.GenOps.Core
encodeBase64TensorFlow.GenOps.Core
encodeJpegTensorFlow.GenOps.Core
encodePngTensorFlow.GenOps.Core
enterTensorFlow.GenOps.Core
equalTensorFlow.GenOps.Core
erfTensorFlow.GenOps.Core
erfcTensorFlow.GenOps.Core
exitTensorFlow.GenOps.Core
expTensorFlow.GenOps.Core
expandDimsTensorFlow.GenOps.Core
extractGlimpseTensorFlow.GenOps.Core
extractImagePatchesTensorFlow.GenOps.Core
factTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
fFTTensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fillTensorFlow.GenOps.Core
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
floorTensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
gatherTensorFlow.GenOps.Core
gatherNdTensorFlow.GenOps.Core
getSessionHandleTensorFlow.GenOps.Core
getSessionTensorTensorFlow.GenOps.Core
greaterTensorFlow.GenOps.Core
greaterEqualTensorFlow.GenOps.Core
histogramSummaryTensorFlow.GenOps.Core
hSVToRGBTensorFlow.GenOps.Core
identityTensorFlow.GenOps.Core
identityReaderTensorFlow.GenOps.Core
iFFTTensorFlow.GenOps.Core
iFFT2DTensorFlow.GenOps.Core
iFFT3DTensorFlow.GenOps.Core
igammaTensorFlow.GenOps.Core
igammacTensorFlow.GenOps.Core
imagTensorFlow.GenOps.Core
imageSummaryTensorFlow.GenOps.Core
immutableConstTensorFlow.GenOps.Core
initializeTableTensorFlow.GenOps.Core
initializeTableFromTextFileTensorFlow.GenOps.Core
inTopKTensorFlow.GenOps.Core
invTensorFlow.GenOps.Core
invertPermutationTensorFlow.GenOps.Core
invGradTensorFlow.GenOps.Core
isFiniteTensorFlow.GenOps.Core
isInfTensorFlow.GenOps.Core
isNanTensorFlow.GenOps.Core
isVariableInitializedTensorFlow.GenOps.Core
l2LossTensorFlow.GenOps.Core
learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
lessTensorFlow.GenOps.Core
lessEqualTensorFlow.GenOps.Core
lgammaTensorFlow.GenOps.Core
linSpaceTensorFlow.GenOps.Core
listDiffTensorFlow.GenOps.Core
logTensorFlow.GenOps.Core
log1pTensorFlow.GenOps.Core
logicalAndTensorFlow.GenOps.Core
logicalNotTensorFlow.GenOps.Core
logicalOrTensorFlow.GenOps.Core
logSoftmaxTensorFlow.GenOps.Core
logUniformCandidateSamplerTensorFlow.GenOps.Core
lookupTableExportTensorFlow.GenOps.Core
lookupTableFindTensorFlow.GenOps.Core
lookupTableImportTensorFlow.GenOps.Core
lookupTableInsertTensorFlow.GenOps.Core
lookupTableSizeTensorFlow.GenOps.Core
loopCondTensorFlow.GenOps.Core
lRNTensorFlow.GenOps.Core
lRNGradTensorFlow.GenOps.Core
matchingFilesTensorFlow.GenOps.Core
matMulTensorFlow.GenOps.Core
matrixBandPartTensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
maxTensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
meanTensorFlow.GenOps.Core
mergeTensorFlow.GenOps.Core
mergeSummaryTensorFlow.GenOps.Core
mergeV2CheckpointsTensorFlow.GenOps.Core
minTensorFlow.GenOps.Core
minimumTensorFlow.GenOps.Core
mirrorPadTensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
modTensorFlow.GenOps.Core
mulTensorFlow.GenOps.Core
multinomialTensorFlow.GenOps.Core
negTensorFlow.GenOps.Core
negTrainTensorFlow.GenOps.Core
nextIterationTensorFlow.GenOps.Core
nonMaxSuppressionTensorFlow.GenOps.Core
noOpTensorFlow.GenOps.Core
notEqualTensorFlow.GenOps.Core
oneHotTensorFlow.GenOps.Core
packTensorFlow.GenOps.Core
padTensorFlow.GenOps.Core
paddingFIFOQueueTensorFlow.GenOps.Core
parameterizedTruncatedNormalTensorFlow.GenOps.Core
parseTensorTensorFlow.GenOps.Core
placeholderTensorFlow.GenOps.Core
placeholderV2TensorFlow.GenOps.Core
placeholderWithDefaultTensorFlow.GenOps.Core
polygammaTensorFlow.GenOps.Core
powTensorFlow.GenOps.Core
priorityQueueTensorFlow.GenOps.Core
prodTensorFlow.GenOps.Core
quantizeAndDequantizeTensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
queueCloseTensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
randomCropTensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
rangeTensorFlow.GenOps.Core
rankTensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readVariableOpTensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reduceJoinTensorFlow.GenOps.Core
refEnterTensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
refMergeTensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
reluTensorFlow.GenOps.Core
relu6TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
reluGradTensorFlow.GenOps.Core
requantizationRangeTensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
reshapeTensorFlow.GenOps.Core
resizeAreaTensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resourceGatherTensorFlow.GenOps.Core
resourceScatterAddTensorFlow.GenOps.Core
restoreTensorFlow.GenOps.Core
restoreSliceTensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
rsqrtTensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
scalarSummaryTensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
sdcaFprintTensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
shapeTensorFlow.GenOps.Core
shapeNTensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoidGradTensorFlow.GenOps.Core
signTensorFlow.GenOps.Core
sinTensorFlow.GenOps.Core
sizeTensorFlow.GenOps.Core
sliceTensorFlow.GenOps.Core
softmaxTensorFlow.GenOps.Core
softmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
softplusTensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
spaceToBatchTensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseToDenseTensorFlow.GenOps.Core
splitTensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stopGradientTensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
subTensorFlow.GenOps.Core
substrTensorFlow.GenOps.Core
sumTensorFlow.GenOps.Core
svdTensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
takeManySparseFromTensorsMapTensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
temporaryVariableTensorFlow.GenOps.Core
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayCloseV2TensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayConcatV2TensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGatherV2TensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayGradV2TensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayReadV2TensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArrayScatterV2TensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySizeV2TensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArraySplitV2TensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorArrayWriteV2TensorFlow.GenOps.Core
tensorSummaryTensorFlow.GenOps.Core
textLineReaderTensorFlow.GenOps.Core
tFRecordReaderTensorFlow.GenOps.Core
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
tileTensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
topKTensorFlow.GenOps.Core
topKV2TensorFlow.GenOps.Core
transposeTensorFlow.GenOps.Core
truncateDivTensorFlow.GenOps.Core
truncatedNormalTensorFlow.GenOps.Core
truncateModTensorFlow.GenOps.Core
uniformCandidateSamplerTensorFlow.GenOps.Core
uniqueTensorFlow.GenOps.Core
uniqueWithCountsTensorFlow.GenOps.Core
unpackTensorFlow.GenOps.Core
unsortedSegmentSumTensorFlow.GenOps.Core
varHandleOpTensorFlow.GenOps.Core
variableTensorFlow.GenOps.Core
varIsInitializedOpTensorFlow.GenOps.Core
where'TensorFlow.GenOps.Core
wholeFileReaderTensorFlow.GenOps.Core
writeFileTensorFlow.GenOps.Core
zerosLikeTensorFlow.GenOps.Core
zetaTensorFlow.GenOps.Core
_ArgTensorFlow.GenOps.Core
_HostCastTensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_RecvTensorFlow.GenOps.Core
_RetvalTensorFlow.GenOps.Core
_SendTensorFlow.GenOps.Core
\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index

abortTensorFlow.GenOps.Core
abort'TensorFlow.GenOps.Core
absTensorFlow.GenOps.Core
abs'TensorFlow.GenOps.Core
accumulatorApplyGradientTensorFlow.GenOps.Core
accumulatorApplyGradient'TensorFlow.GenOps.Core
accumulatorNumAccumulatedTensorFlow.GenOps.Core
accumulatorNumAccumulated'TensorFlow.GenOps.Core
accumulatorSetGlobalStepTensorFlow.GenOps.Core
accumulatorSetGlobalStep'TensorFlow.GenOps.Core
accumulatorTakeGradientTensorFlow.GenOps.Core
accumulatorTakeGradient'TensorFlow.GenOps.Core
acosTensorFlow.GenOps.Core
acos'TensorFlow.GenOps.Core
addTensorFlow.GenOps.Core
add'TensorFlow.GenOps.Core
addManySparseToTensorsMapTensorFlow.GenOps.Core
addManySparseToTensorsMap'TensorFlow.GenOps.Core
addNTensorFlow.GenOps.Core
addN'TensorFlow.GenOps.Core
addSparseToTensorsMapTensorFlow.GenOps.Core
addSparseToTensorsMap'TensorFlow.GenOps.Core
adjustContrastTensorFlow.GenOps.Core
adjustContrast'TensorFlow.GenOps.Core
adjustContrastv2TensorFlow.GenOps.Core
adjustContrastv2'TensorFlow.GenOps.Core
adjustHueTensorFlow.GenOps.Core
adjustHue'TensorFlow.GenOps.Core
adjustSaturationTensorFlow.GenOps.Core
adjustSaturation'TensorFlow.GenOps.Core
allTensorFlow.GenOps.Core
all'TensorFlow.GenOps.Core
allCandidateSamplerTensorFlow.GenOps.Core
allCandidateSampler'TensorFlow.GenOps.Core
anyTensorFlow.GenOps.Core
any'TensorFlow.GenOps.Core
applyAdadeltaTensorFlow.GenOps.Core
applyAdadelta'TensorFlow.GenOps.Core
applyAdagradTensorFlow.GenOps.Core
applyAdagrad'TensorFlow.GenOps.Core
applyAdagradDATensorFlow.GenOps.Core
applyAdagradDA'TensorFlow.GenOps.Core
applyAdamTensorFlow.GenOps.Core
applyAdam'TensorFlow.GenOps.Core
applyCenteredRMSPropTensorFlow.GenOps.Core
applyCenteredRMSProp'TensorFlow.GenOps.Core
applyFtrlTensorFlow.GenOps.Core
applyFtrl'TensorFlow.GenOps.Core
applyGradientDescentTensorFlow.GenOps.Core
applyGradientDescent'TensorFlow.GenOps.Core
applyMomentumTensorFlow.GenOps.Core
applyMomentum'TensorFlow.GenOps.Core
applyProximalAdagradTensorFlow.GenOps.Core
applyProximalAdagrad'TensorFlow.GenOps.Core
applyProximalGradientDescentTensorFlow.GenOps.Core
applyProximalGradientDescent'TensorFlow.GenOps.Core
applyRMSPropTensorFlow.GenOps.Core
applyRMSProp'TensorFlow.GenOps.Core
argMaxTensorFlow.GenOps.Core
argMax'TensorFlow.GenOps.Core
argMinTensorFlow.GenOps.Core
argMin'TensorFlow.GenOps.Core
asinTensorFlow.GenOps.Core
asin'TensorFlow.GenOps.Core
assertTensorFlow.GenOps.Core
assert'TensorFlow.GenOps.Core
assignTensorFlow.GenOps.Core
assign'TensorFlow.GenOps.Core
assignAddTensorFlow.GenOps.Core
assignAdd'TensorFlow.GenOps.Core
assignAddVariableOpTensorFlow.GenOps.Core
assignAddVariableOp'TensorFlow.GenOps.Core
assignSubTensorFlow.GenOps.Core
assignSub'TensorFlow.GenOps.Core
assignVariableOpTensorFlow.GenOps.Core
assignVariableOp'TensorFlow.GenOps.Core
asStringTensorFlow.GenOps.Core
asString'TensorFlow.GenOps.Core
atanTensorFlow.GenOps.Core
atan'TensorFlow.GenOps.Core
audioSummaryTensorFlow.GenOps.Core
audioSummary'TensorFlow.GenOps.Core
audioSummaryV2TensorFlow.GenOps.Core
audioSummaryV2'TensorFlow.GenOps.Core
avgPoolTensorFlow.GenOps.Core
avgPool'TensorFlow.GenOps.Core
avgPool3DTensorFlow.GenOps.Core
avgPool3D'TensorFlow.GenOps.Core
avgPool3DGradTensorFlow.GenOps.Core
avgPool3DGrad'TensorFlow.GenOps.Core
avgPoolGradTensorFlow.GenOps.Core
avgPoolGrad'TensorFlow.GenOps.Core
barrierTensorFlow.GenOps.Core
barrier'TensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierClose'TensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierIncompleteSize'TensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierInsertMany'TensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
barrierReadySize'TensorFlow.GenOps.Core
barrierTakeManyTensorFlow.GenOps.Core
barrierTakeMany'TensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholesky'TensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchCholeskyGrad'TensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT'TensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT2D'TensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchFFT3D'TensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT'TensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT2D'TensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchIFFT3D'TensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatMul'TensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixBandPart'TensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDeterminant'TensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiag'TensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixDiagPart'TensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixInverse'TensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSetDiag'TensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolve'TensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixSolveLs'TensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchMatrixTriangularSolve'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEig'TensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSelfAdjointEigV2'TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchSvd'TensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpace'TensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
batchToSpaceND'TensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
betainc'TensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAdd'TensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddGrad'TensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasAddV1'TensorFlow.GenOps.Core
bitcastTensorFlow.GenOps.Core
bitcast'TensorFlow.GenOps.Core
broadcastArgsTensorFlow.GenOps.Core
broadcastArgs'TensorFlow.GenOps.Core
broadcastGradientArgsTensorFlow.GenOps.Core
broadcastGradientArgs'TensorFlow.GenOps.Core
castTensorFlow.GenOps.Core
cast'TensorFlow.GenOps.Core
ceilTensorFlow.GenOps.Core
ceil'TensorFlow.GenOps.Core
checkNumericsTensorFlow.GenOps.Core
checkNumerics'TensorFlow.GenOps.Core
choleskyTensorFlow.GenOps.Core
cholesky'TensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
choleskyGrad'TensorFlow.GenOps.Core
complexTensorFlow.GenOps.Core
complex'TensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
complexAbs'TensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
computeAccidentalHits'TensorFlow.GenOps.Core
concatTensorFlow.GenOps.Core
concat'TensorFlow.GenOps.Core
concatOffsetTensorFlow.GenOps.Core
concatOffset'TensorFlow.GenOps.Core
concatV2TensorFlow.GenOps.Core
concatV2'TensorFlow.GenOps.Core
conditionalAccumulatorTensorFlow.GenOps.Core
conditionalAccumulator'TensorFlow.GenOps.Core
conjTensorFlow.GenOps.Core
conj'TensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
const'TensorFlow.GenOps.Core
controlTriggerTensorFlow.GenOps.Core
controlTrigger'TensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2D'TensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropFilter'TensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv2DBackpropInput'TensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3D'TensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilter'TensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropFilterV2'TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInput'TensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
conv3DBackpropInputV2'TensorFlow.GenOps.Core
copyTensorFlow.GenOps.Core
copy'TensorFlow.GenOps.Core
copyHostTensorFlow.GenOps.Core
copyHost'TensorFlow.GenOps.Core
cosTensorFlow.GenOps.Core
cos'TensorFlow.GenOps.Core
countUpToTensorFlow.GenOps.Core
countUpTo'TensorFlow.GenOps.Core
cropAndResizeTensorFlow.GenOps.Core
cropAndResize'TensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradBoxes'TensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
cropAndResizeGradImage'TensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cross'TensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCBeamSearchDecoder'TensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCGreedyDecoder'TensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cTCLoss'TensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumprod'TensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
cumsum'TensorFlow.GenOps.Core
debugIdentityTensorFlow.GenOps.Core
debugIdentity'TensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
debugNanCount'TensorFlow.GenOps.Core
debugNumericSummaryTensorFlow.GenOps.Core
debugNumericSummary'TensorFlow.GenOps.Core
decodeBase64TensorFlow.GenOps.Core
decodeBase64'TensorFlow.GenOps.Core
decodeCSVTensorFlow.GenOps.Core
decodeCSV'TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeGif'TensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJpeg'TensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodeJSONExample'TensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodePng'TensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeRaw'TensorFlow.GenOps.Core
deleteSessionTensorTensorFlow.GenOps.Core
deleteSessionTensor'TensorFlow.GenOps.Core
denseToDenseSetOperationTensorFlow.GenOps.Core
denseToDenseSetOperation'TensorFlow.GenOps.Core
denseToSparseSetOperationTensorFlow.GenOps.Core
denseToSparseSetOperation'TensorFlow.GenOps.Core
depthToSpaceTensorFlow.GenOps.Core
depthToSpace'TensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNative'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
dequantize'TensorFlow.GenOps.Core
deserializeManySparseTensorFlow.GenOps.Core
deserializeManySparse'TensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
destroyTemporaryVariable'TensorFlow.GenOps.Core
diagTensorFlow.GenOps.Core
diag'TensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
diagPart'TensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
digamma'TensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2D'TensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropFilter'TensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dilation2DBackpropInput'TensorFlow.GenOps.Core
divTensorFlow.GenOps.Core
div'TensorFlow.GenOps.Core
drawBoundingBoxesTensorFlow.GenOps.Core
drawBoundingBoxes'TensorFlow.GenOps.Core
dynamicPartitionTensorFlow.GenOps.Core
dynamicPartition'TensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
dynamicStitch'TensorFlow.GenOps.Core
editDistanceTensorFlow.GenOps.Core
editDistance'TensorFlow.GenOps.Core
eluTensorFlow.GenOps.Core
elu'TensorFlow.GenOps.Core
eluGradTensorFlow.GenOps.Core
eluGrad'TensorFlow.GenOps.Core
encodeBase64TensorFlow.GenOps.Core
encodeBase64'TensorFlow.GenOps.Core
encodeJpegTensorFlow.GenOps.Core
encodeJpeg'TensorFlow.GenOps.Core
encodePngTensorFlow.GenOps.Core
encodePng'TensorFlow.GenOps.Core
enterTensorFlow.GenOps.Core
enter'TensorFlow.GenOps.Core
equalTensorFlow.GenOps.Core
equal'TensorFlow.GenOps.Core
erfTensorFlow.GenOps.Core
erf'TensorFlow.GenOps.Core
erfcTensorFlow.GenOps.Core
erfc'TensorFlow.GenOps.Core
exitTensorFlow.GenOps.Core
exit'TensorFlow.GenOps.Core
expTensorFlow.GenOps.Core
exp'TensorFlow.GenOps.Core
expandDimsTensorFlow.GenOps.Core
expandDims'TensorFlow.GenOps.Core
expm1TensorFlow.GenOps.Core
expm1'TensorFlow.GenOps.Core
extractGlimpseTensorFlow.GenOps.Core
extractGlimpse'TensorFlow.GenOps.Core
extractImagePatchesTensorFlow.GenOps.Core
extractImagePatches'TensorFlow.GenOps.Core
factTensorFlow.GenOps.Core
fact'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
fakeQueueTensorFlow.GenOps.Core
fakeQueue'TensorFlow.GenOps.Core
fFTTensorFlow.GenOps.Core
fFT'TensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT2D'TensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fFT3D'TensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fIFOQueue'TensorFlow.GenOps.Core
fIFOQueueV2TensorFlow.GenOps.Core
fIFOQueueV2'TensorFlow.GenOps.Core
fillTensorFlow.GenOps.Core
fill'TensorFlow.GenOps.Core
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedLengthRecordReader'TensorFlow.GenOps.Core
fixedLengthRecordReaderV2TensorFlow.GenOps.Core
fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
floorTensorFlow.GenOps.Core
floor'TensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorDiv'TensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
floorMod'TensorFlow.GenOps.Core
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPool'TensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalAvgPoolGrad'TensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPool'TensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolGrad'TensorFlow.GenOps.Core
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNorm'TensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedBatchNormGrad'TensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedPadConv2D'TensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
gatherTensorFlow.GenOps.Core
gather'TensorFlow.GenOps.Core
gatherNdTensorFlow.GenOps.Core
gatherNd'TensorFlow.GenOps.Core
getSessionHandleTensorFlow.GenOps.Core
getSessionHandle'TensorFlow.GenOps.Core
getSessionTensorTensorFlow.GenOps.Core
getSessionTensor'TensorFlow.GenOps.Core
greaterTensorFlow.GenOps.Core
greater'TensorFlow.GenOps.Core
greaterEqualTensorFlow.GenOps.Core
greaterEqual'TensorFlow.GenOps.Core
hashTableTensorFlow.GenOps.Core
hashTable'TensorFlow.GenOps.Core
histogramSummaryTensorFlow.GenOps.Core
histogramSummary'TensorFlow.GenOps.Core
hSVToRGBTensorFlow.GenOps.Core
hSVToRGB'TensorFlow.GenOps.Core
identityTensorFlow.GenOps.Core
identity'TensorFlow.GenOps.Core
identityReaderTensorFlow.GenOps.Core
identityReader'TensorFlow.GenOps.Core
identityReaderV2TensorFlow.GenOps.Core
identityReaderV2'TensorFlow.GenOps.Core
iFFTTensorFlow.GenOps.Core
iFFT'TensorFlow.GenOps.Core
iFFT2DTensorFlow.GenOps.Core
iFFT2D'TensorFlow.GenOps.Core
iFFT3DTensorFlow.GenOps.Core
iFFT3D'TensorFlow.GenOps.Core
igammaTensorFlow.GenOps.Core
igamma'TensorFlow.GenOps.Core
igammacTensorFlow.GenOps.Core
igammac'TensorFlow.GenOps.Core
imagTensorFlow.GenOps.Core
imag'TensorFlow.GenOps.Core
imageSummaryTensorFlow.GenOps.Core
imageSummary'TensorFlow.GenOps.Core
immutableConstTensorFlow.GenOps.Core
immutableConst'TensorFlow.GenOps.Core
initializeTableTensorFlow.GenOps.Core
initializeTable'TensorFlow.GenOps.Core
initializeTableFromTextFileTensorFlow.GenOps.Core
initializeTableFromTextFile'TensorFlow.GenOps.Core
inTopKTensorFlow.GenOps.Core
inTopK'TensorFlow.GenOps.Core
invTensorFlow.GenOps.Core
inv'TensorFlow.GenOps.Core
invertPermutationTensorFlow.GenOps.Core
invertPermutation'TensorFlow.GenOps.Core
invGradTensorFlow.GenOps.Core
invGrad'TensorFlow.GenOps.Core
isFiniteTensorFlow.GenOps.Core
isFinite'TensorFlow.GenOps.Core
isInfTensorFlow.GenOps.Core
isInf'TensorFlow.GenOps.Core
isNanTensorFlow.GenOps.Core
isNan'TensorFlow.GenOps.Core
isVariableInitializedTensorFlow.GenOps.Core
isVariableInitialized'TensorFlow.GenOps.Core
l2LossTensorFlow.GenOps.Core
l2Loss'TensorFlow.GenOps.Core
learnedUnigramCandidateSamplerTensorFlow.GenOps.Core
learnedUnigramCandidateSampler'TensorFlow.GenOps.Core
lessTensorFlow.GenOps.Core
less'TensorFlow.GenOps.Core
lessEqualTensorFlow.GenOps.Core
lessEqual'TensorFlow.GenOps.Core
lgammaTensorFlow.GenOps.Core
lgamma'TensorFlow.GenOps.Core
linSpaceTensorFlow.GenOps.Core
linSpace'TensorFlow.GenOps.Core
listDiffTensorFlow.GenOps.Core
listDiff'TensorFlow.GenOps.Core
logTensorFlow.GenOps.Core
log'TensorFlow.GenOps.Core
log1pTensorFlow.GenOps.Core
log1p'TensorFlow.GenOps.Core
logicalAndTensorFlow.GenOps.Core
logicalAnd'TensorFlow.GenOps.Core
logicalNotTensorFlow.GenOps.Core
logicalNot'TensorFlow.GenOps.Core
logicalOrTensorFlow.GenOps.Core
logicalOr'TensorFlow.GenOps.Core
logSoftmaxTensorFlow.GenOps.Core
logSoftmax'TensorFlow.GenOps.Core
logUniformCandidateSamplerTensorFlow.GenOps.Core
logUniformCandidateSampler'TensorFlow.GenOps.Core
lookupTableExportTensorFlow.GenOps.Core
lookupTableExport'TensorFlow.GenOps.Core
lookupTableFindTensorFlow.GenOps.Core
lookupTableFind'TensorFlow.GenOps.Core
lookupTableImportTensorFlow.GenOps.Core
lookupTableImport'TensorFlow.GenOps.Core
lookupTableInsertTensorFlow.GenOps.Core
lookupTableInsert'TensorFlow.GenOps.Core
lookupTableSizeTensorFlow.GenOps.Core
lookupTableSize'TensorFlow.GenOps.Core
loopCondTensorFlow.GenOps.Core
loopCond'TensorFlow.GenOps.Core
lRNTensorFlow.GenOps.Core
lRN'TensorFlow.GenOps.Core
lRNGradTensorFlow.GenOps.Core
lRNGrad'TensorFlow.GenOps.Core
matchingFilesTensorFlow.GenOps.Core
matchingFiles'TensorFlow.GenOps.Core
matMulTensorFlow.GenOps.Core
matMul'TensorFlow.GenOps.Core
matrixBandPartTensorFlow.GenOps.Core
matrixBandPart'TensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDeterminant'TensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiag'TensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixDiagPart'TensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixInverse'TensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSetDiag'TensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolve'TensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixSolveLs'TensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matrixTriangularSolve'TensorFlow.GenOps.Core
maxTensorFlow.GenOps.Core
max'TensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maximum'TensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool'TensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3D'TensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPool3DGrad'TensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGrad'TensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolGradWithArgmax'TensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmax'TensorFlow.GenOps.Core
meanTensorFlow.GenOps.Core
mean'TensorFlow.GenOps.Core
mergeTensorFlow.GenOps.Core
merge'TensorFlow.GenOps.Core
mergeSummaryTensorFlow.GenOps.Core
mergeSummary'TensorFlow.GenOps.Core
mergeV2CheckpointsTensorFlow.GenOps.Core
mergeV2Checkpoints'TensorFlow.GenOps.Core
minTensorFlow.GenOps.Core
min'TensorFlow.GenOps.Core
minimumTensorFlow.GenOps.Core
minimum'TensorFlow.GenOps.Core
mirrorPadTensorFlow.GenOps.Core
mirrorPad'TensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
mirrorPadGrad'TensorFlow.GenOps.Core
modTensorFlow.GenOps.Core
mod'TensorFlow.GenOps.Core
mulTensorFlow.GenOps.Core
mul'TensorFlow.GenOps.Core
multinomialTensorFlow.GenOps.Core
multinomial'TensorFlow.GenOps.Core
mutableDenseHashTableTensorFlow.GenOps.Core
mutableDenseHashTable'TensorFlow.GenOps.Core
mutableHashTableTensorFlow.GenOps.Core
mutableHashTable'TensorFlow.GenOps.Core
mutableHashTableOfTensorsTensorFlow.GenOps.Core
mutableHashTableOfTensors'TensorFlow.GenOps.Core
negTensorFlow.GenOps.Core
neg'TensorFlow.GenOps.Core
negTrainTensorFlow.GenOps.Core
negTrain'TensorFlow.GenOps.Core
nextIterationTensorFlow.GenOps.Core
nextIteration'TensorFlow.GenOps.Core
nonMaxSuppressionTensorFlow.GenOps.Core
nonMaxSuppression'TensorFlow.GenOps.Core
noOpTensorFlow.GenOps.Core
noOp'TensorFlow.GenOps.Core
notEqualTensorFlow.GenOps.Core
notEqual'TensorFlow.GenOps.Core
oneHotTensorFlow.GenOps.Core
oneHot'TensorFlow.GenOps.Core
packTensorFlow.GenOps.Core
pack'TensorFlow.GenOps.Core
padTensorFlow.GenOps.Core
pad'TensorFlow.GenOps.Core
paddingFIFOQueueTensorFlow.GenOps.Core
paddingFIFOQueue'TensorFlow.GenOps.Core
paddingFIFOQueueV2TensorFlow.GenOps.Core
paddingFIFOQueueV2'TensorFlow.GenOps.Core
parallelConcatTensorFlow.GenOps.Core
parallelConcat'TensorFlow.GenOps.Core
parameterizedTruncatedNormalTensorFlow.GenOps.Core
parameterizedTruncatedNormal'TensorFlow.GenOps.Core
parseExampleTensorFlow.GenOps.Core
parseExample'TensorFlow.GenOps.Core
parseSingleSequenceExampleTensorFlow.GenOps.Core
parseSingleSequenceExample'TensorFlow.GenOps.Core
parseTensorTensorFlow.GenOps.Core
parseTensor'TensorFlow.GenOps.Core
placeholderTensorFlow.GenOps.Core
placeholder'TensorFlow.GenOps.Core
placeholderV2TensorFlow.GenOps.Core
placeholderV2'TensorFlow.GenOps.Core
placeholderWithDefaultTensorFlow.GenOps.Core
placeholderWithDefault'TensorFlow.GenOps.Core
polygammaTensorFlow.GenOps.Core
polygamma'TensorFlow.GenOps.Core
powTensorFlow.GenOps.Core
pow'TensorFlow.GenOps.Core
preventGradientTensorFlow.GenOps.Core
preventGradient'TensorFlow.GenOps.Core
printTensorFlow.GenOps.Core
print'TensorFlow.GenOps.Core
priorityQueueTensorFlow.GenOps.Core
priorityQueue'TensorFlow.GenOps.Core
priorityQueueV2TensorFlow.GenOps.Core
priorityQueueV2'TensorFlow.GenOps.Core
prodTensorFlow.GenOps.Core
prod'TensorFlow.GenOps.Core
qrTensorFlow.GenOps.Core
qr'TensorFlow.GenOps.Core
quantizeAndDequantizeTensorFlow.GenOps.Core
quantizeAndDequantize'TensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedAvgPool'TensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedBiasAdd'TensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConcat'TensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedConv2D'TensorFlow.GenOps.Core
quantizedInstanceNormTensorFlow.GenOps.Core
quantizedInstanceNorm'TensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMatMul'TensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizedMaxPool'TensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu'TensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedRelu6'TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReluX'TensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizedReshape'TensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
quantizeV2'TensorFlow.GenOps.Core
queueCloseTensorFlow.GenOps.Core
queueClose'TensorFlow.GenOps.Core
queueCloseV2TensorFlow.GenOps.Core
queueCloseV2'TensorFlow.GenOps.Core
queueDequeueTensorFlow.GenOps.Core
queueDequeue'TensorFlow.GenOps.Core
queueDequeueManyTensorFlow.GenOps.Core
queueDequeueMany'TensorFlow.GenOps.Core
queueDequeueManyV2TensorFlow.GenOps.Core
queueDequeueManyV2'TensorFlow.GenOps.Core
queueDequeueUpToTensorFlow.GenOps.Core
queueDequeueUpTo'TensorFlow.GenOps.Core
queueDequeueUpToV2TensorFlow.GenOps.Core
queueDequeueUpToV2'TensorFlow.GenOps.Core
queueDequeueV2TensorFlow.GenOps.Core
queueDequeueV2'TensorFlow.GenOps.Core
queueEnqueueTensorFlow.GenOps.Core
queueEnqueue'TensorFlow.GenOps.Core
queueEnqueueManyTensorFlow.GenOps.Core
queueEnqueueMany'TensorFlow.GenOps.Core
queueEnqueueManyV2TensorFlow.GenOps.Core
queueEnqueueManyV2'TensorFlow.GenOps.Core
queueEnqueueV2TensorFlow.GenOps.Core
queueEnqueueV2'TensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
queueSize'TensorFlow.GenOps.Core
queueSizeV2TensorFlow.GenOps.Core
queueSizeV2'TensorFlow.GenOps.Core
randomCropTensorFlow.GenOps.Core
randomCrop'TensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomGamma'TensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffle'TensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomShuffleQueue'TensorFlow.GenOps.Core
randomShuffleQueueV2TensorFlow.GenOps.Core
randomShuffleQueueV2'TensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomStandardNormal'TensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniform'TensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
randomUniformInt'TensorFlow.GenOps.Core
rangeTensorFlow.GenOps.Core
range'TensorFlow.GenOps.Core
rankTensorFlow.GenOps.Core
rank'TensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumRecordsProduced'TensorFlow.GenOps.Core
readerNumRecordsProducedV2TensorFlow.GenOps.Core
readerNumRecordsProducedV2'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerRead'TensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerReadUpTo'TensorFlow.GenOps.Core
readerReadUpToV2TensorFlow.GenOps.Core
readerReadUpToV2'TensorFlow.GenOps.Core
readerReadV2TensorFlow.GenOps.Core
readerReadV2'TensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerReset'TensorFlow.GenOps.Core
readerResetV2TensorFlow.GenOps.Core
readerResetV2'TensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerRestoreState'TensorFlow.GenOps.Core
readerRestoreStateV2TensorFlow.GenOps.Core
readerRestoreStateV2'TensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readerSerializeState'TensorFlow.GenOps.Core
readerSerializeStateV2TensorFlow.GenOps.Core
readerSerializeStateV2'TensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readFile'TensorFlow.GenOps.Core
readVariableOpTensorFlow.GenOps.Core
readVariableOp'TensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
real'TensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
realDiv'TensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocal'TensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reciprocalGrad'TensorFlow.GenOps.Core
recordInputTensorFlow.GenOps.Core
recordInput'TensorFlow.GenOps.Core
reduceJoinTensorFlow.GenOps.Core
reduceJoin'TensorFlow.GenOps.Core
refEnterTensorFlow.GenOps.Core
refEnter'TensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refExit'TensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
refIdentity'TensorFlow.GenOps.Core
refMergeTensorFlow.GenOps.Core
refMerge'TensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refNextIteration'TensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSelect'TensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
refSwitch'TensorFlow.GenOps.Core
reluTensorFlow.GenOps.Core
relu'TensorFlow.GenOps.Core
relu6TensorFlow.GenOps.Core
relu6'TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
relu6Grad'TensorFlow.GenOps.Core
reluGradTensorFlow.GenOps.Core
reluGrad'TensorFlow.GenOps.Core
requantizationRangeTensorFlow.GenOps.Core
requantizationRange'TensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
requantize'TensorFlow.GenOps.Core
reshapeTensorFlow.GenOps.Core
reshape'TensorFlow.GenOps.Core
resizeAreaTensorFlow.GenOps.Core
resizeArea'TensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBicubic'TensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinear'TensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeBilinearGrad'TensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighbor'TensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resizeNearestNeighborGrad'TensorFlow.GenOps.Core
resourceApplyAdadeltaTensorFlow.GenOps.Core
resourceApplyAdadelta'TensorFlow.GenOps.Core
resourceApplyAdagradTensorFlow.GenOps.Core
resourceApplyAdagrad'TensorFlow.GenOps.Core
resourceApplyAdagradDATensorFlow.GenOps.Core
resourceApplyAdagradDA'TensorFlow.GenOps.Core
resourceApplyAdamTensorFlow.GenOps.Core
resourceApplyAdam'TensorFlow.GenOps.Core
resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceApplyFtrlTensorFlow.GenOps.Core
resourceApplyFtrl'TensorFlow.GenOps.Core
resourceApplyGradientDescentTensorFlow.GenOps.Core
resourceApplyGradientDescent'TensorFlow.GenOps.Core
resourceApplyMomentumTensorFlow.GenOps.Core
resourceApplyMomentum'TensorFlow.GenOps.Core
resourceApplyProximalAdagradTensorFlow.GenOps.Core
resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceApplyRMSPropTensorFlow.GenOps.Core
resourceApplyRMSProp'TensorFlow.GenOps.Core
resourceGatherTensorFlow.GenOps.Core
resourceGather'TensorFlow.GenOps.Core
resourceScatterAddTensorFlow.GenOps.Core
resourceScatterAdd'TensorFlow.GenOps.Core
resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
resourceSparseApplyAdagradTensorFlow.GenOps.Core
resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyAdagradDATensorFlow.GenOps.Core
resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceSparseApplyFtrlTensorFlow.GenOps.Core
resourceSparseApplyFtrl'TensorFlow.GenOps.Core
resourceSparseApplyMomentumTensorFlow.GenOps.Core
resourceSparseApplyMomentum'TensorFlow.GenOps.Core
resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceSparseApplyRMSPropTensorFlow.GenOps.Core
resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
restoreTensorFlow.GenOps.Core
restore'TensorFlow.GenOps.Core
restoreSliceTensorFlow.GenOps.Core
restoreSlice'TensorFlow.GenOps.Core
restoreV2TensorFlow.GenOps.Core
restoreV2'TensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverse'TensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseSequence'TensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
reverseV2'TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rGBToHSV'TensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
rint'TensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
round'TensorFlow.GenOps.Core
rsqrtTensorFlow.GenOps.Core
rsqrt'TensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
rsqrtGrad'TensorFlow.GenOps.Core
sampleDistortedBoundingBoxTensorFlow.GenOps.Core
sampleDistortedBoundingBox'TensorFlow.GenOps.Core
saveTensorFlow.GenOps.Core
save'TensorFlow.GenOps.Core
saveSlicesTensorFlow.GenOps.Core
saveSlices'TensorFlow.GenOps.Core
saveV2TensorFlow.GenOps.Core
saveV2'TensorFlow.GenOps.Core
scalarSummaryTensorFlow.GenOps.Core
scalarSummary'TensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterAdd'TensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterDiv'TensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterMul'TensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNd'TensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdAdd'TensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdSub'TensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterNdUpdate'TensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterSub'TensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scatterUpdate'TensorFlow.GenOps.Core
sdcaFprintTensorFlow.GenOps.Core
sdcaFprint'TensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaOptimizer'TensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
sdcaShrinkL1'TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMax'TensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMean'TensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentMin'TensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentProd'TensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
segmentSum'TensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
select'TensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEig'TensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
selfAdjointEigV2'TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeManySparse'TensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
serializeSparse'TensorFlow.GenOps.Core
setSizeTensorFlow.GenOps.Core
setSize'TensorFlow.GenOps.Core
shapeTensorFlow.GenOps.Core
shape'TensorFlow.GenOps.Core
shapeNTensorFlow.GenOps.Core
shapeN'TensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilename'TensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
shardedFilespec'TensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoid'TensorFlow.GenOps.Core
sigmoidGradTensorFlow.GenOps.Core
sigmoidGrad'TensorFlow.GenOps.Core
signTensorFlow.GenOps.Core
sign'TensorFlow.GenOps.Core
sinTensorFlow.GenOps.Core
sin'TensorFlow.GenOps.Core
sizeTensorFlow.GenOps.Core
size'TensorFlow.GenOps.Core
skipgramTensorFlow.GenOps.Core
skipgram'TensorFlow.GenOps.Core
sliceTensorFlow.GenOps.Core
slice'TensorFlow.GenOps.Core
softmaxTensorFlow.GenOps.Core
softmax'TensorFlow.GenOps.Core
softmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
softmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
softplusTensorFlow.GenOps.Core
softplus'TensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softplusGrad'TensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsign'TensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
softsignGrad'TensorFlow.GenOps.Core
spaceToBatchTensorFlow.GenOps.Core
spaceToBatch'TensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToBatchND'TensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
spaceToDepth'TensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAdd'TensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseAddGrad'TensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdadelta'TensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagrad'TensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyAdagradDA'TensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyFtrl'TensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyMomentum'TensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseApplyRMSProp'TensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseConcat'TensorFlow.GenOps.Core
sparseConditionalAccumulatorTensorFlow.GenOps.Core
sparseConditionalAccumulator'TensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseAdd'TensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseDiv'TensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseDenseCwiseMul'TensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseMatMul'TensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSum'TensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReduceSumSparse'TensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReorder'TensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseReshape'TensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMean'TensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentMeanGrad'TensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtN'TensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSegmentSum'TensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmax'TensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMaximum'TensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSparseMinimum'TensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseSplit'TensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseAdd'TensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseTensorDenseMatMul'TensorFlow.GenOps.Core
sparseToDenseTensorFlow.GenOps.Core
sparseToDense'TensorFlow.GenOps.Core
sparseToSparseSetOperationTensorFlow.GenOps.Core
sparseToSparseSetOperation'TensorFlow.GenOps.Core
splitTensorFlow.GenOps.Core
split'TensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
splitV'TensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrt'TensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
sqrtGrad'TensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
square'TensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squaredDifference'TensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
squeeze'TensorFlow.GenOps.Core
stackTensorFlow.GenOps.Core
stack'TensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackClose'TensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPop'TensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stackPush'TensorFlow.GenOps.Core
stageTensorFlow.GenOps.Core
stage'TensorFlow.GenOps.Core
stopGradientTensorFlow.GenOps.Core
stopGradient'TensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSlice'TensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceAssign'TensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stridedSliceGrad'TensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringJoin'TensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringSplit'TensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucket'TensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketFast'TensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToHashBucketStrong'TensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringToNumber'TensorFlow.GenOps.Core
subTensorFlow.GenOps.Core
sub'TensorFlow.GenOps.Core
substrTensorFlow.GenOps.Core
substr'TensorFlow.GenOps.Core
sumTensorFlow.GenOps.Core
sum'TensorFlow.GenOps.Core
svdTensorFlow.GenOps.Core
svd'TensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
switch'TensorFlow.GenOps.Core
takeManySparseFromTensorsMapTensorFlow.GenOps.Core
takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tan'TensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanh'TensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
tanhGrad'TensorFlow.GenOps.Core
temporaryVariableTensorFlow.GenOps.Core
temporaryVariable'TensorFlow.GenOps.Core
tensorArrayTensorFlow.GenOps.Core
tensorArray'TensorFlow.GenOps.Core
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayClose'TensorFlow.GenOps.Core
tensorArrayCloseV2TensorFlow.GenOps.Core
tensorArrayCloseV2'TensorFlow.GenOps.Core
tensorArrayCloseV3TensorFlow.GenOps.Core
tensorArrayCloseV3'TensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayConcat'TensorFlow.GenOps.Core
tensorArrayConcatV2TensorFlow.GenOps.Core
tensorArrayConcatV2'TensorFlow.GenOps.Core
tensorArrayConcatV3TensorFlow.GenOps.Core
tensorArrayConcatV3'TensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGather'TensorFlow.GenOps.Core
tensorArrayGatherV2TensorFlow.GenOps.Core
tensorArrayGatherV2'TensorFlow.GenOps.Core
tensorArrayGatherV3TensorFlow.GenOps.Core
tensorArrayGatherV3'TensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayGrad'TensorFlow.GenOps.Core
tensorArrayGradV2TensorFlow.GenOps.Core
tensorArrayGradV2'TensorFlow.GenOps.Core
tensorArrayGradV3TensorFlow.GenOps.Core
tensorArrayGradV3'TensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayPack'TensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayRead'TensorFlow.GenOps.Core
tensorArrayReadV2TensorFlow.GenOps.Core
tensorArrayReadV2'TensorFlow.GenOps.Core
tensorArrayReadV3TensorFlow.GenOps.Core
tensorArrayReadV3'TensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArrayScatter'TensorFlow.GenOps.Core
tensorArrayScatterV2TensorFlow.GenOps.Core
tensorArrayScatterV2'TensorFlow.GenOps.Core
tensorArrayScatterV3TensorFlow.GenOps.Core
tensorArrayScatterV3'TensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySize'TensorFlow.GenOps.Core
tensorArraySizeV2TensorFlow.GenOps.Core
tensorArraySizeV2'TensorFlow.GenOps.Core
tensorArraySizeV3TensorFlow.GenOps.Core
tensorArraySizeV3'TensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArraySplit'TensorFlow.GenOps.Core
tensorArraySplitV2TensorFlow.GenOps.Core
tensorArraySplitV2'TensorFlow.GenOps.Core
tensorArraySplitV3TensorFlow.GenOps.Core
tensorArraySplitV3'TensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayUnpack'TensorFlow.GenOps.Core
tensorArrayV2TensorFlow.GenOps.Core
tensorArrayV2'TensorFlow.GenOps.Core
tensorArrayV3TensorFlow.GenOps.Core
tensorArrayV3'TensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorArrayWrite'TensorFlow.GenOps.Core
tensorArrayWriteV2TensorFlow.GenOps.Core
tensorArrayWriteV2'TensorFlow.GenOps.Core
tensorArrayWriteV3TensorFlow.GenOps.Core
tensorArrayWriteV3'TensorFlow.GenOps.Core
tensorSummaryTensorFlow.GenOps.Core
tensorSummary'TensorFlow.GenOps.Core
textLineReaderTensorFlow.GenOps.Core
textLineReader'TensorFlow.GenOps.Core
textLineReaderV2TensorFlow.GenOps.Core
textLineReaderV2'TensorFlow.GenOps.Core
tFRecordReaderTensorFlow.GenOps.Core
tFRecordReader'TensorFlow.GenOps.Core
tFRecordReaderV2TensorFlow.GenOps.Core
tFRecordReaderV2'TensorFlow.GenOps.Core
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
tileTensorFlow.GenOps.Core
tile'TensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
tileGrad'TensorFlow.GenOps.Core
topKTensorFlow.GenOps.Core
topK'TensorFlow.GenOps.Core
topKV2TensorFlow.GenOps.Core
topKV2'TensorFlow.GenOps.Core
transposeTensorFlow.GenOps.Core
transpose'TensorFlow.GenOps.Core
truncateDivTensorFlow.GenOps.Core
truncateDiv'TensorFlow.GenOps.Core
truncatedNormalTensorFlow.GenOps.Core
truncatedNormal'TensorFlow.GenOps.Core
truncateModTensorFlow.GenOps.Core
truncateMod'TensorFlow.GenOps.Core
uniformCandidateSamplerTensorFlow.GenOps.Core
uniformCandidateSampler'TensorFlow.GenOps.Core
uniqueTensorFlow.GenOps.Core
unique'TensorFlow.GenOps.Core
uniqueWithCountsTensorFlow.GenOps.Core
uniqueWithCounts'TensorFlow.GenOps.Core
unpackTensorFlow.GenOps.Core
unpack'TensorFlow.GenOps.Core
unsortedSegmentSumTensorFlow.GenOps.Core
unsortedSegmentSum'TensorFlow.GenOps.Core
unstageTensorFlow.GenOps.Core
unstage'TensorFlow.GenOps.Core
varHandleOpTensorFlow.GenOps.Core
varHandleOp'TensorFlow.GenOps.Core
variableTensorFlow.GenOps.Core
variable'TensorFlow.GenOps.Core
variableV2TensorFlow.GenOps.Core
variableV2'TensorFlow.GenOps.Core
varIsInitializedOpTensorFlow.GenOps.Core
varIsInitializedOp'TensorFlow.GenOps.Core
where'TensorFlow.GenOps.Core
where''TensorFlow.GenOps.Core
wholeFileReaderTensorFlow.GenOps.Core
wholeFileReader'TensorFlow.GenOps.Core
wholeFileReaderV2TensorFlow.GenOps.Core
wholeFileReaderV2'TensorFlow.GenOps.Core
writeFileTensorFlow.GenOps.Core
writeFile'TensorFlow.GenOps.Core
zerosLikeTensorFlow.GenOps.Core
zerosLike'TensorFlow.GenOps.Core
zetaTensorFlow.GenOps.Core
zeta'TensorFlow.GenOps.Core
_ArgTensorFlow.GenOps.Core
_Arg'TensorFlow.GenOps.Core
_ArrayToListTensorFlow.GenOps.Core
_ArrayToList'TensorFlow.GenOps.Core
_HostCastTensorFlow.GenOps.Core
_HostCast'TensorFlow.GenOps.Core
_HostRecvTensorFlow.GenOps.Core
_HostRecv'TensorFlow.GenOps.Core
_HostSendTensorFlow.GenOps.Core
_HostSend'TensorFlow.GenOps.Core
_ListToArrayTensorFlow.GenOps.Core
_ListToArray'TensorFlow.GenOps.Core
_ParallelConcatStartTensorFlow.GenOps.Core
_ParallelConcatStart'TensorFlow.GenOps.Core
_ParallelConcatUpdateTensorFlow.GenOps.Core
_ParallelConcatUpdate'TensorFlow.GenOps.Core
_RecvTensorFlow.GenOps.Core
_Recv'TensorFlow.GenOps.Core
_RetvalTensorFlow.GenOps.Core
_Retval'TensorFlow.GenOps.Core
_SendTensorFlow.GenOps.Core
_Send'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html index b7ff19b..d6a5e1b 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-B.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - B)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - B

barrierTensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
bitcastTensorFlow.GenOps.Core
broadcastGradientArgsTensorFlow.GenOps.Core
\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - B

barrierTensorFlow.GenOps.Core
barrier'TensorFlow.GenOps.Core
barrierCloseTensorFlow.GenOps.Core
barrierClose'TensorFlow.GenOps.Core
barrierIncompleteSizeTensorFlow.GenOps.Core
barrierIncompleteSize'TensorFlow.GenOps.Core
barrierInsertManyTensorFlow.GenOps.Core
barrierInsertMany'TensorFlow.GenOps.Core
barrierReadySizeTensorFlow.GenOps.Core
barrierReadySize'TensorFlow.GenOps.Core
barrierTakeManyTensorFlow.GenOps.Core
barrierTakeMany'TensorFlow.GenOps.Core
batchCholeskyTensorFlow.GenOps.Core
batchCholesky'TensorFlow.GenOps.Core
batchCholeskyGradTensorFlow.GenOps.Core
batchCholeskyGrad'TensorFlow.GenOps.Core
batchFFTTensorFlow.GenOps.Core
batchFFT'TensorFlow.GenOps.Core
batchFFT2DTensorFlow.GenOps.Core
batchFFT2D'TensorFlow.GenOps.Core
batchFFT3DTensorFlow.GenOps.Core
batchFFT3D'TensorFlow.GenOps.Core
batchIFFTTensorFlow.GenOps.Core
batchIFFT'TensorFlow.GenOps.Core
batchIFFT2DTensorFlow.GenOps.Core
batchIFFT2D'TensorFlow.GenOps.Core
batchIFFT3DTensorFlow.GenOps.Core
batchIFFT3D'TensorFlow.GenOps.Core
batchMatMulTensorFlow.GenOps.Core
batchMatMul'TensorFlow.GenOps.Core
batchMatrixBandPartTensorFlow.GenOps.Core
batchMatrixBandPart'TensorFlow.GenOps.Core
batchMatrixDeterminantTensorFlow.GenOps.Core
batchMatrixDeterminant'TensorFlow.GenOps.Core
batchMatrixDiagTensorFlow.GenOps.Core
batchMatrixDiag'TensorFlow.GenOps.Core
batchMatrixDiagPartTensorFlow.GenOps.Core
batchMatrixDiagPart'TensorFlow.GenOps.Core
batchMatrixInverseTensorFlow.GenOps.Core
batchMatrixInverse'TensorFlow.GenOps.Core
batchMatrixSetDiagTensorFlow.GenOps.Core
batchMatrixSetDiag'TensorFlow.GenOps.Core
batchMatrixSolveTensorFlow.GenOps.Core
batchMatrixSolve'TensorFlow.GenOps.Core
batchMatrixSolveLsTensorFlow.GenOps.Core
batchMatrixSolveLs'TensorFlow.GenOps.Core
batchMatrixTriangularSolveTensorFlow.GenOps.Core
batchMatrixTriangularSolve'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationTensorFlow.GenOps.Core
batchNormWithGlobalNormalization'TensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGradTensorFlow.GenOps.Core
batchNormWithGlobalNormalizationGrad'TensorFlow.GenOps.Core
batchSelfAdjointEigTensorFlow.GenOps.Core
batchSelfAdjointEig'TensorFlow.GenOps.Core
batchSelfAdjointEigV2TensorFlow.GenOps.Core
batchSelfAdjointEigV2'TensorFlow.GenOps.Core
batchSvdTensorFlow.GenOps.Core
batchSvd'TensorFlow.GenOps.Core
batchToSpaceTensorFlow.GenOps.Core
batchToSpace'TensorFlow.GenOps.Core
batchToSpaceNDTensorFlow.GenOps.Core
batchToSpaceND'TensorFlow.GenOps.Core
betaincTensorFlow.GenOps.Core
betainc'TensorFlow.GenOps.Core
biasAddTensorFlow.GenOps.Core
biasAdd'TensorFlow.GenOps.Core
biasAddGradTensorFlow.GenOps.Core
biasAddGrad'TensorFlow.GenOps.Core
biasAddV1TensorFlow.GenOps.Core
biasAddV1'TensorFlow.GenOps.Core
bitcastTensorFlow.GenOps.Core
bitcast'TensorFlow.GenOps.Core
broadcastArgsTensorFlow.GenOps.Core
broadcastArgs'TensorFlow.GenOps.Core
broadcastGradientArgsTensorFlow.GenOps.Core
broadcastGradientArgs'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html index 56c8b62..9b087ae 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-C.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - C)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - C

castTensorFlow.GenOps.Core
cast'TensorFlow.GenOps.Core
ceilTensorFlow.GenOps.Core
ceil'TensorFlow.GenOps.Core
checkNumericsTensorFlow.GenOps.Core
checkNumerics'TensorFlow.GenOps.Core
choleskyTensorFlow.GenOps.Core
cholesky'TensorFlow.GenOps.Core
choleskyGradTensorFlow.GenOps.Core
choleskyGrad'TensorFlow.GenOps.Core
complexTensorFlow.GenOps.Core
complex'TensorFlow.GenOps.Core
complexAbsTensorFlow.GenOps.Core
complexAbs'TensorFlow.GenOps.Core
computeAccidentalHitsTensorFlow.GenOps.Core
computeAccidentalHits'TensorFlow.GenOps.Core
concatTensorFlow.GenOps.Core
concat'TensorFlow.GenOps.Core
concatOffsetTensorFlow.GenOps.Core
concatOffset'TensorFlow.GenOps.Core
concatV2TensorFlow.GenOps.Core
concatV2'TensorFlow.GenOps.Core
conditionalAccumulatorTensorFlow.GenOps.Core
conditionalAccumulator'TensorFlow.GenOps.Core
conjTensorFlow.GenOps.Core
conj'TensorFlow.GenOps.Core
constTensorFlow.GenOps.Core
const'TensorFlow.GenOps.Core
controlTriggerTensorFlow.GenOps.Core
controlTrigger'TensorFlow.GenOps.Core
conv2DTensorFlow.GenOps.Core
conv2D'TensorFlow.GenOps.Core
conv2DBackpropFilterTensorFlow.GenOps.Core
conv2DBackpropFilter'TensorFlow.GenOps.Core
conv2DBackpropInputTensorFlow.GenOps.Core
conv2DBackpropInput'TensorFlow.GenOps.Core
conv3DTensorFlow.GenOps.Core
conv3D'TensorFlow.GenOps.Core
conv3DBackpropFilterTensorFlow.GenOps.Core
conv3DBackpropFilter'TensorFlow.GenOps.Core
conv3DBackpropFilterV2TensorFlow.GenOps.Core
conv3DBackpropFilterV2'TensorFlow.GenOps.Core
conv3DBackpropInputTensorFlow.GenOps.Core
conv3DBackpropInput'TensorFlow.GenOps.Core
conv3DBackpropInputV2TensorFlow.GenOps.Core
conv3DBackpropInputV2'TensorFlow.GenOps.Core
copyTensorFlow.GenOps.Core
copy'TensorFlow.GenOps.Core
copyHostTensorFlow.GenOps.Core
copyHost'TensorFlow.GenOps.Core
cosTensorFlow.GenOps.Core
cos'TensorFlow.GenOps.Core
countUpToTensorFlow.GenOps.Core
countUpTo'TensorFlow.GenOps.Core
cropAndResizeTensorFlow.GenOps.Core
cropAndResize'TensorFlow.GenOps.Core
cropAndResizeGradBoxesTensorFlow.GenOps.Core
cropAndResizeGradBoxes'TensorFlow.GenOps.Core
cropAndResizeGradImageTensorFlow.GenOps.Core
cropAndResizeGradImage'TensorFlow.GenOps.Core
crossTensorFlow.GenOps.Core
cross'TensorFlow.GenOps.Core
cTCBeamSearchDecoderTensorFlow.GenOps.Core
cTCBeamSearchDecoder'TensorFlow.GenOps.Core
cTCGreedyDecoderTensorFlow.GenOps.Core
cTCGreedyDecoder'TensorFlow.GenOps.Core
cTCLossTensorFlow.GenOps.Core
cTCLoss'TensorFlow.GenOps.Core
cumprodTensorFlow.GenOps.Core
cumprod'TensorFlow.GenOps.Core
cumsumTensorFlow.GenOps.Core
cumsum'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html index 5b3e374..73a756e 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-D.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - D)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - D

debugIdentityTensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
decodeBase64TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
deleteSessionTensorTensorFlow.GenOps.Core
depthToSpaceTensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
deserializeManySparseTensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
diagTensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
divTensorFlow.GenOps.Core
drawBoundingBoxesTensorFlow.GenOps.Core
dynamicPartitionTensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - D

debugIdentityTensorFlow.GenOps.Core
debugIdentity'TensorFlow.GenOps.Core
debugNanCountTensorFlow.GenOps.Core
debugNanCount'TensorFlow.GenOps.Core
debugNumericSummaryTensorFlow.GenOps.Core
debugNumericSummary'TensorFlow.GenOps.Core
decodeBase64TensorFlow.GenOps.Core
decodeBase64'TensorFlow.GenOps.Core
decodeCSVTensorFlow.GenOps.Core
decodeCSV'TensorFlow.GenOps.Core
decodeGifTensorFlow.GenOps.Core
decodeGif'TensorFlow.GenOps.Core
decodeJpegTensorFlow.GenOps.Core
decodeJpeg'TensorFlow.GenOps.Core
decodeJSONExampleTensorFlow.GenOps.Core
decodeJSONExample'TensorFlow.GenOps.Core
decodePngTensorFlow.GenOps.Core
decodePng'TensorFlow.GenOps.Core
decodeRawTensorFlow.GenOps.Core
decodeRaw'TensorFlow.GenOps.Core
deleteSessionTensorTensorFlow.GenOps.Core
deleteSessionTensor'TensorFlow.GenOps.Core
denseToDenseSetOperationTensorFlow.GenOps.Core
denseToDenseSetOperation'TensorFlow.GenOps.Core
denseToSparseSetOperationTensorFlow.GenOps.Core
denseToSparseSetOperation'TensorFlow.GenOps.Core
depthToSpaceTensorFlow.GenOps.Core
depthToSpace'TensorFlow.GenOps.Core
depthwiseConv2dNativeTensorFlow.GenOps.Core
depthwiseConv2dNative'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilterTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropFilter'TensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInputTensorFlow.GenOps.Core
depthwiseConv2dNativeBackpropInput'TensorFlow.GenOps.Core
dequantizeTensorFlow.GenOps.Core
dequantize'TensorFlow.GenOps.Core
deserializeManySparseTensorFlow.GenOps.Core
deserializeManySparse'TensorFlow.GenOps.Core
destroyTemporaryVariableTensorFlow.GenOps.Core
destroyTemporaryVariable'TensorFlow.GenOps.Core
diagTensorFlow.GenOps.Core
diag'TensorFlow.GenOps.Core
diagPartTensorFlow.GenOps.Core
diagPart'TensorFlow.GenOps.Core
digammaTensorFlow.GenOps.Core
digamma'TensorFlow.GenOps.Core
dilation2DTensorFlow.GenOps.Core
dilation2D'TensorFlow.GenOps.Core
dilation2DBackpropFilterTensorFlow.GenOps.Core
dilation2DBackpropFilter'TensorFlow.GenOps.Core
dilation2DBackpropInputTensorFlow.GenOps.Core
dilation2DBackpropInput'TensorFlow.GenOps.Core
divTensorFlow.GenOps.Core
div'TensorFlow.GenOps.Core
drawBoundingBoxesTensorFlow.GenOps.Core
drawBoundingBoxes'TensorFlow.GenOps.Core
dynamicPartitionTensorFlow.GenOps.Core
dynamicPartition'TensorFlow.GenOps.Core
dynamicStitchTensorFlow.GenOps.Core
dynamicStitch'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html index 1ae3874..a3f764f 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-E.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - E)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html index 63972a7..e3c8490 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-F.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - F)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - F

factTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
fFTTensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fillTensorFlow.GenOps.Core
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
floorTensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - F

factTensorFlow.GenOps.Core
fact'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgs'TensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxArgsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsTensorFlow.GenOps.Core
fakeQuantWithMinMaxVars'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsGradient'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannel'TensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradientTensorFlow.GenOps.Core
fakeQuantWithMinMaxVarsPerChannelGradient'TensorFlow.GenOps.Core
fakeQueueTensorFlow.GenOps.Core
fakeQueue'TensorFlow.GenOps.Core
fFTTensorFlow.GenOps.Core
fFT'TensorFlow.GenOps.Core
fFT2DTensorFlow.GenOps.Core
fFT2D'TensorFlow.GenOps.Core
fFT3DTensorFlow.GenOps.Core
fFT3D'TensorFlow.GenOps.Core
fIFOQueueTensorFlow.GenOps.Core
fIFOQueue'TensorFlow.GenOps.Core
fIFOQueueV2TensorFlow.GenOps.Core
fIFOQueueV2'TensorFlow.GenOps.Core
fillTensorFlow.GenOps.Core
fill'TensorFlow.GenOps.Core
fixedLengthRecordReaderTensorFlow.GenOps.Core
fixedLengthRecordReader'TensorFlow.GenOps.Core
fixedLengthRecordReaderV2TensorFlow.GenOps.Core
fixedLengthRecordReaderV2'TensorFlow.GenOps.Core
fixedUnigramCandidateSamplerTensorFlow.GenOps.Core
fixedUnigramCandidateSampler'TensorFlow.GenOps.Core
floorTensorFlow.GenOps.Core
floor'TensorFlow.GenOps.Core
floorDivTensorFlow.GenOps.Core
floorDiv'TensorFlow.GenOps.Core
floorModTensorFlow.GenOps.Core
floorMod'TensorFlow.GenOps.Core
fractionalAvgPoolTensorFlow.GenOps.Core
fractionalAvgPool'TensorFlow.GenOps.Core
fractionalAvgPoolGradTensorFlow.GenOps.Core
fractionalAvgPoolGrad'TensorFlow.GenOps.Core
fractionalMaxPoolTensorFlow.GenOps.Core
fractionalMaxPool'TensorFlow.GenOps.Core
fractionalMaxPoolGradTensorFlow.GenOps.Core
fractionalMaxPoolGrad'TensorFlow.GenOps.Core
fusedBatchNormTensorFlow.GenOps.Core
fusedBatchNorm'TensorFlow.GenOps.Core
fusedBatchNormGradTensorFlow.GenOps.Core
fusedBatchNormGrad'TensorFlow.GenOps.Core
fusedPadConv2DTensorFlow.GenOps.Core
fusedPadConv2D'TensorFlow.GenOps.Core
fusedResizeAndPadConv2DTensorFlow.GenOps.Core
fusedResizeAndPadConv2D'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html index 3623797..5620d20 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-G.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - G)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html index 12f960e..f331a82 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-H.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - H)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html index 5076cc3..789d9a2 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-I.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - I)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html index ae01d94..5497639 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-L.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - L)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html index e49eda1..b97b8c6 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-M.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - M)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - M

matchingFilesTensorFlow.GenOps.Core
matchingFiles'TensorFlow.GenOps.Core
matMulTensorFlow.GenOps.Core
matMul'TensorFlow.GenOps.Core
matrixBandPartTensorFlow.GenOps.Core
matrixBandPart'TensorFlow.GenOps.Core
matrixDeterminantTensorFlow.GenOps.Core
matrixDeterminant'TensorFlow.GenOps.Core
matrixDiagTensorFlow.GenOps.Core
matrixDiag'TensorFlow.GenOps.Core
matrixDiagPartTensorFlow.GenOps.Core
matrixDiagPart'TensorFlow.GenOps.Core
matrixInverseTensorFlow.GenOps.Core
matrixInverse'TensorFlow.GenOps.Core
matrixSetDiagTensorFlow.GenOps.Core
matrixSetDiag'TensorFlow.GenOps.Core
matrixSolveTensorFlow.GenOps.Core
matrixSolve'TensorFlow.GenOps.Core
matrixSolveLsTensorFlow.GenOps.Core
matrixSolveLs'TensorFlow.GenOps.Core
matrixTriangularSolveTensorFlow.GenOps.Core
matrixTriangularSolve'TensorFlow.GenOps.Core
maxTensorFlow.GenOps.Core
max'TensorFlow.GenOps.Core
maximumTensorFlow.GenOps.Core
maximum'TensorFlow.GenOps.Core
maxPoolTensorFlow.GenOps.Core
maxPool'TensorFlow.GenOps.Core
maxPool3DTensorFlow.GenOps.Core
maxPool3D'TensorFlow.GenOps.Core
maxPool3DGradTensorFlow.GenOps.Core
maxPool3DGrad'TensorFlow.GenOps.Core
maxPoolGradTensorFlow.GenOps.Core
maxPoolGrad'TensorFlow.GenOps.Core
maxPoolGradWithArgmaxTensorFlow.GenOps.Core
maxPoolGradWithArgmax'TensorFlow.GenOps.Core
maxPoolWithArgmaxTensorFlow.GenOps.Core
maxPoolWithArgmax'TensorFlow.GenOps.Core
meanTensorFlow.GenOps.Core
mean'TensorFlow.GenOps.Core
mergeTensorFlow.GenOps.Core
merge'TensorFlow.GenOps.Core
mergeSummaryTensorFlow.GenOps.Core
mergeSummary'TensorFlow.GenOps.Core
mergeV2CheckpointsTensorFlow.GenOps.Core
mergeV2Checkpoints'TensorFlow.GenOps.Core
minTensorFlow.GenOps.Core
min'TensorFlow.GenOps.Core
minimumTensorFlow.GenOps.Core
minimum'TensorFlow.GenOps.Core
mirrorPadTensorFlow.GenOps.Core
mirrorPad'TensorFlow.GenOps.Core
mirrorPadGradTensorFlow.GenOps.Core
mirrorPadGrad'TensorFlow.GenOps.Core
modTensorFlow.GenOps.Core
mod'TensorFlow.GenOps.Core
mulTensorFlow.GenOps.Core
mul'TensorFlow.GenOps.Core
multinomialTensorFlow.GenOps.Core
multinomial'TensorFlow.GenOps.Core
mutableDenseHashTableTensorFlow.GenOps.Core
mutableDenseHashTable'TensorFlow.GenOps.Core
mutableHashTableTensorFlow.GenOps.Core
mutableHashTable'TensorFlow.GenOps.Core
mutableHashTableOfTensorsTensorFlow.GenOps.Core
mutableHashTableOfTensors'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html index c2e42d8..c5012ff 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-N.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - N)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html index 1f3b4cc..0ef5caa 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-O.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - O)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html index 05affdf..0acc213 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-P.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - P)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html index c95d8d1..0fa93cf 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Q.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - Q)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - Q

quantizeAndDequantizeTensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
queueCloseTensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - Q

qrTensorFlow.GenOps.Core
qr'TensorFlow.GenOps.Core
quantizeAndDequantizeTensorFlow.GenOps.Core
quantizeAndDequantize'TensorFlow.GenOps.Core
quantizedAvgPoolTensorFlow.GenOps.Core
quantizedAvgPool'TensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalizationTensorFlow.GenOps.Core
quantizedBatchNormWithGlobalNormalization'TensorFlow.GenOps.Core
quantizedBiasAddTensorFlow.GenOps.Core
quantizedBiasAdd'TensorFlow.GenOps.Core
quantizedConcatTensorFlow.GenOps.Core
quantizedConcat'TensorFlow.GenOps.Core
quantizedConv2DTensorFlow.GenOps.Core
quantizedConv2D'TensorFlow.GenOps.Core
quantizedInstanceNormTensorFlow.GenOps.Core
quantizedInstanceNorm'TensorFlow.GenOps.Core
quantizedMatMulTensorFlow.GenOps.Core
quantizedMatMul'TensorFlow.GenOps.Core
quantizedMaxPoolTensorFlow.GenOps.Core
quantizedMaxPool'TensorFlow.GenOps.Core
quantizeDownAndShrinkRangeTensorFlow.GenOps.Core
quantizeDownAndShrinkRange'TensorFlow.GenOps.Core
quantizedReluTensorFlow.GenOps.Core
quantizedRelu'TensorFlow.GenOps.Core
quantizedRelu6TensorFlow.GenOps.Core
quantizedRelu6'TensorFlow.GenOps.Core
quantizedReluXTensorFlow.GenOps.Core
quantizedReluX'TensorFlow.GenOps.Core
quantizedReshapeTensorFlow.GenOps.Core
quantizedReshape'TensorFlow.GenOps.Core
quantizeV2TensorFlow.GenOps.Core
quantizeV2'TensorFlow.GenOps.Core
queueCloseTensorFlow.GenOps.Core
queueClose'TensorFlow.GenOps.Core
queueCloseV2TensorFlow.GenOps.Core
queueCloseV2'TensorFlow.GenOps.Core
queueDequeueTensorFlow.GenOps.Core
queueDequeue'TensorFlow.GenOps.Core
queueDequeueManyTensorFlow.GenOps.Core
queueDequeueMany'TensorFlow.GenOps.Core
queueDequeueManyV2TensorFlow.GenOps.Core
queueDequeueManyV2'TensorFlow.GenOps.Core
queueDequeueUpToTensorFlow.GenOps.Core
queueDequeueUpTo'TensorFlow.GenOps.Core
queueDequeueUpToV2TensorFlow.GenOps.Core
queueDequeueUpToV2'TensorFlow.GenOps.Core
queueDequeueV2TensorFlow.GenOps.Core
queueDequeueV2'TensorFlow.GenOps.Core
queueEnqueueTensorFlow.GenOps.Core
queueEnqueue'TensorFlow.GenOps.Core
queueEnqueueManyTensorFlow.GenOps.Core
queueEnqueueMany'TensorFlow.GenOps.Core
queueEnqueueManyV2TensorFlow.GenOps.Core
queueEnqueueManyV2'TensorFlow.GenOps.Core
queueEnqueueV2TensorFlow.GenOps.Core
queueEnqueueV2'TensorFlow.GenOps.Core
queueSizeTensorFlow.GenOps.Core
queueSize'TensorFlow.GenOps.Core
queueSizeV2TensorFlow.GenOps.Core
queueSizeV2'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html index ebc855b..b3c90e5 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-R.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - R)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - R

randomCropTensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
rangeTensorFlow.GenOps.Core
rankTensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readVariableOpTensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reduceJoinTensorFlow.GenOps.Core
refEnterTensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
refMergeTensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
reluTensorFlow.GenOps.Core
relu6TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
reluGradTensorFlow.GenOps.Core
requantizationRangeTensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
reshapeTensorFlow.GenOps.Core
resizeAreaTensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resourceGatherTensorFlow.GenOps.Core
resourceScatterAddTensorFlow.GenOps.Core
restoreTensorFlow.GenOps.Core
restoreSliceTensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
rsqrtTensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - R

randomCropTensorFlow.GenOps.Core
randomCrop'TensorFlow.GenOps.Core
randomGammaTensorFlow.GenOps.Core
randomGamma'TensorFlow.GenOps.Core
randomShuffleTensorFlow.GenOps.Core
randomShuffle'TensorFlow.GenOps.Core
randomShuffleQueueTensorFlow.GenOps.Core
randomShuffleQueue'TensorFlow.GenOps.Core
randomShuffleQueueV2TensorFlow.GenOps.Core
randomShuffleQueueV2'TensorFlow.GenOps.Core
randomStandardNormalTensorFlow.GenOps.Core
randomStandardNormal'TensorFlow.GenOps.Core
randomUniformTensorFlow.GenOps.Core
randomUniform'TensorFlow.GenOps.Core
randomUniformIntTensorFlow.GenOps.Core
randomUniformInt'TensorFlow.GenOps.Core
rangeTensorFlow.GenOps.Core
range'TensorFlow.GenOps.Core
rankTensorFlow.GenOps.Core
rank'TensorFlow.GenOps.Core
readerNumRecordsProducedTensorFlow.GenOps.Core
readerNumRecordsProduced'TensorFlow.GenOps.Core
readerNumRecordsProducedV2TensorFlow.GenOps.Core
readerNumRecordsProducedV2'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedTensorFlow.GenOps.Core
readerNumWorkUnitsCompleted'TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2TensorFlow.GenOps.Core
readerNumWorkUnitsCompletedV2'TensorFlow.GenOps.Core
readerReadTensorFlow.GenOps.Core
readerRead'TensorFlow.GenOps.Core
readerReadUpToTensorFlow.GenOps.Core
readerReadUpTo'TensorFlow.GenOps.Core
readerReadUpToV2TensorFlow.GenOps.Core
readerReadUpToV2'TensorFlow.GenOps.Core
readerReadV2TensorFlow.GenOps.Core
readerReadV2'TensorFlow.GenOps.Core
readerResetTensorFlow.GenOps.Core
readerReset'TensorFlow.GenOps.Core
readerResetV2TensorFlow.GenOps.Core
readerResetV2'TensorFlow.GenOps.Core
readerRestoreStateTensorFlow.GenOps.Core
readerRestoreState'TensorFlow.GenOps.Core
readerRestoreStateV2TensorFlow.GenOps.Core
readerRestoreStateV2'TensorFlow.GenOps.Core
readerSerializeStateTensorFlow.GenOps.Core
readerSerializeState'TensorFlow.GenOps.Core
readerSerializeStateV2TensorFlow.GenOps.Core
readerSerializeStateV2'TensorFlow.GenOps.Core
readFileTensorFlow.GenOps.Core
readFile'TensorFlow.GenOps.Core
readVariableOpTensorFlow.GenOps.Core
readVariableOp'TensorFlow.GenOps.Core
realTensorFlow.GenOps.Core
real'TensorFlow.GenOps.Core
realDivTensorFlow.GenOps.Core
realDiv'TensorFlow.GenOps.Core
reciprocalTensorFlow.GenOps.Core
reciprocal'TensorFlow.GenOps.Core
reciprocalGradTensorFlow.GenOps.Core
reciprocalGrad'TensorFlow.GenOps.Core
recordInputTensorFlow.GenOps.Core
recordInput'TensorFlow.GenOps.Core
reduceJoinTensorFlow.GenOps.Core
reduceJoin'TensorFlow.GenOps.Core
refEnterTensorFlow.GenOps.Core
refEnter'TensorFlow.GenOps.Core
refExitTensorFlow.GenOps.Core
refExit'TensorFlow.GenOps.Core
refIdentityTensorFlow.GenOps.Core
refIdentity'TensorFlow.GenOps.Core
refMergeTensorFlow.GenOps.Core
refMerge'TensorFlow.GenOps.Core
refNextIterationTensorFlow.GenOps.Core
refNextIteration'TensorFlow.GenOps.Core
refSelectTensorFlow.GenOps.Core
refSelect'TensorFlow.GenOps.Core
refSwitchTensorFlow.GenOps.Core
refSwitch'TensorFlow.GenOps.Core
reluTensorFlow.GenOps.Core
relu'TensorFlow.GenOps.Core
relu6TensorFlow.GenOps.Core
relu6'TensorFlow.GenOps.Core
relu6GradTensorFlow.GenOps.Core
relu6Grad'TensorFlow.GenOps.Core
reluGradTensorFlow.GenOps.Core
reluGrad'TensorFlow.GenOps.Core
requantizationRangeTensorFlow.GenOps.Core
requantizationRange'TensorFlow.GenOps.Core
requantizeTensorFlow.GenOps.Core
requantize'TensorFlow.GenOps.Core
reshapeTensorFlow.GenOps.Core
reshape'TensorFlow.GenOps.Core
resizeAreaTensorFlow.GenOps.Core
resizeArea'TensorFlow.GenOps.Core
resizeBicubicTensorFlow.GenOps.Core
resizeBicubic'TensorFlow.GenOps.Core
resizeBilinearTensorFlow.GenOps.Core
resizeBilinear'TensorFlow.GenOps.Core
resizeBilinearGradTensorFlow.GenOps.Core
resizeBilinearGrad'TensorFlow.GenOps.Core
resizeNearestNeighborTensorFlow.GenOps.Core
resizeNearestNeighbor'TensorFlow.GenOps.Core
resizeNearestNeighborGradTensorFlow.GenOps.Core
resizeNearestNeighborGrad'TensorFlow.GenOps.Core
resourceApplyAdadeltaTensorFlow.GenOps.Core
resourceApplyAdadelta'TensorFlow.GenOps.Core
resourceApplyAdagradTensorFlow.GenOps.Core
resourceApplyAdagrad'TensorFlow.GenOps.Core
resourceApplyAdagradDATensorFlow.GenOps.Core
resourceApplyAdagradDA'TensorFlow.GenOps.Core
resourceApplyAdamTensorFlow.GenOps.Core
resourceApplyAdam'TensorFlow.GenOps.Core
resourceApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceApplyFtrlTensorFlow.GenOps.Core
resourceApplyFtrl'TensorFlow.GenOps.Core
resourceApplyGradientDescentTensorFlow.GenOps.Core
resourceApplyGradientDescent'TensorFlow.GenOps.Core
resourceApplyMomentumTensorFlow.GenOps.Core
resourceApplyMomentum'TensorFlow.GenOps.Core
resourceApplyProximalAdagradTensorFlow.GenOps.Core
resourceApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceApplyRMSPropTensorFlow.GenOps.Core
resourceApplyRMSProp'TensorFlow.GenOps.Core
resourceGatherTensorFlow.GenOps.Core
resourceGather'TensorFlow.GenOps.Core
resourceScatterAddTensorFlow.GenOps.Core
resourceScatterAdd'TensorFlow.GenOps.Core
resourceSparseApplyAdadeltaTensorFlow.GenOps.Core
resourceSparseApplyAdadelta'TensorFlow.GenOps.Core
resourceSparseApplyAdagradTensorFlow.GenOps.Core
resourceSparseApplyAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyAdagradDATensorFlow.GenOps.Core
resourceSparseApplyAdagradDA'TensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSPropTensorFlow.GenOps.Core
resourceSparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
resourceSparseApplyFtrlTensorFlow.GenOps.Core
resourceSparseApplyFtrl'TensorFlow.GenOps.Core
resourceSparseApplyMomentumTensorFlow.GenOps.Core
resourceSparseApplyMomentum'TensorFlow.GenOps.Core
resourceSparseApplyProximalAdagradTensorFlow.GenOps.Core
resourceSparseApplyProximalAdagrad'TensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescentTensorFlow.GenOps.Core
resourceSparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
resourceSparseApplyRMSPropTensorFlow.GenOps.Core
resourceSparseApplyRMSProp'TensorFlow.GenOps.Core
restoreTensorFlow.GenOps.Core
restore'TensorFlow.GenOps.Core
restoreSliceTensorFlow.GenOps.Core
restoreSlice'TensorFlow.GenOps.Core
restoreV2TensorFlow.GenOps.Core
restoreV2'TensorFlow.GenOps.Core
reverseTensorFlow.GenOps.Core
reverse'TensorFlow.GenOps.Core
reverseSequenceTensorFlow.GenOps.Core
reverseSequence'TensorFlow.GenOps.Core
reverseV2TensorFlow.GenOps.Core
reverseV2'TensorFlow.GenOps.Core
rGBToHSVTensorFlow.GenOps.Core
rGBToHSV'TensorFlow.GenOps.Core
rintTensorFlow.GenOps.Core
rint'TensorFlow.GenOps.Core
roundTensorFlow.GenOps.Core
round'TensorFlow.GenOps.Core
rsqrtTensorFlow.GenOps.Core
rsqrt'TensorFlow.GenOps.Core
rsqrtGradTensorFlow.GenOps.Core
rsqrtGrad'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html index add4b29..7771021 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-S.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - S)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - S

sampleDistortedBoundingBoxTensorFlow.GenOps.Core
scalarSummaryTensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
sdcaFprintTensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
shapeTensorFlow.GenOps.Core
shapeNTensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoidGradTensorFlow.GenOps.Core
signTensorFlow.GenOps.Core
sinTensorFlow.GenOps.Core
sizeTensorFlow.GenOps.Core
sliceTensorFlow.GenOps.Core
softmaxTensorFlow.GenOps.Core
softmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
softplusTensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
spaceToBatchTensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseToDenseTensorFlow.GenOps.Core
splitTensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stopGradientTensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
subTensorFlow.GenOps.Core
substrTensorFlow.GenOps.Core
sumTensorFlow.GenOps.Core
svdTensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - S

sampleDistortedBoundingBoxTensorFlow.GenOps.Core
sampleDistortedBoundingBox'TensorFlow.GenOps.Core
saveTensorFlow.GenOps.Core
save'TensorFlow.GenOps.Core
saveSlicesTensorFlow.GenOps.Core
saveSlices'TensorFlow.GenOps.Core
saveV2TensorFlow.GenOps.Core
saveV2'TensorFlow.GenOps.Core
scalarSummaryTensorFlow.GenOps.Core
scalarSummary'TensorFlow.GenOps.Core
scatterAddTensorFlow.GenOps.Core
scatterAdd'TensorFlow.GenOps.Core
scatterDivTensorFlow.GenOps.Core
scatterDiv'TensorFlow.GenOps.Core
scatterMulTensorFlow.GenOps.Core
scatterMul'TensorFlow.GenOps.Core
scatterNdTensorFlow.GenOps.Core
scatterNd'TensorFlow.GenOps.Core
scatterNdAddTensorFlow.GenOps.Core
scatterNdAdd'TensorFlow.GenOps.Core
scatterNdSubTensorFlow.GenOps.Core
scatterNdSub'TensorFlow.GenOps.Core
scatterNdUpdateTensorFlow.GenOps.Core
scatterNdUpdate'TensorFlow.GenOps.Core
scatterSubTensorFlow.GenOps.Core
scatterSub'TensorFlow.GenOps.Core
scatterUpdateTensorFlow.GenOps.Core
scatterUpdate'TensorFlow.GenOps.Core
sdcaFprintTensorFlow.GenOps.Core
sdcaFprint'TensorFlow.GenOps.Core
sdcaOptimizerTensorFlow.GenOps.Core
sdcaOptimizer'TensorFlow.GenOps.Core
sdcaShrinkL1TensorFlow.GenOps.Core
sdcaShrinkL1'TensorFlow.GenOps.Core
segmentMaxTensorFlow.GenOps.Core
segmentMax'TensorFlow.GenOps.Core
segmentMeanTensorFlow.GenOps.Core
segmentMean'TensorFlow.GenOps.Core
segmentMinTensorFlow.GenOps.Core
segmentMin'TensorFlow.GenOps.Core
segmentProdTensorFlow.GenOps.Core
segmentProd'TensorFlow.GenOps.Core
segmentSumTensorFlow.GenOps.Core
segmentSum'TensorFlow.GenOps.Core
selectTensorFlow.GenOps.Core
select'TensorFlow.GenOps.Core
selfAdjointEigTensorFlow.GenOps.Core
selfAdjointEig'TensorFlow.GenOps.Core
selfAdjointEigV2TensorFlow.GenOps.Core
selfAdjointEigV2'TensorFlow.GenOps.Core
serializeManySparseTensorFlow.GenOps.Core
serializeManySparse'TensorFlow.GenOps.Core
serializeSparseTensorFlow.GenOps.Core
serializeSparse'TensorFlow.GenOps.Core
setSizeTensorFlow.GenOps.Core
setSize'TensorFlow.GenOps.Core
shapeTensorFlow.GenOps.Core
shape'TensorFlow.GenOps.Core
shapeNTensorFlow.GenOps.Core
shapeN'TensorFlow.GenOps.Core
shardedFilenameTensorFlow.GenOps.Core
shardedFilename'TensorFlow.GenOps.Core
shardedFilespecTensorFlow.GenOps.Core
shardedFilespec'TensorFlow.GenOps.Core
sigmoidTensorFlow.GenOps.Core
sigmoid'TensorFlow.GenOps.Core
sigmoidGradTensorFlow.GenOps.Core
sigmoidGrad'TensorFlow.GenOps.Core
signTensorFlow.GenOps.Core
sign'TensorFlow.GenOps.Core
sinTensorFlow.GenOps.Core
sin'TensorFlow.GenOps.Core
sizeTensorFlow.GenOps.Core
size'TensorFlow.GenOps.Core
skipgramTensorFlow.GenOps.Core
skipgram'TensorFlow.GenOps.Core
sliceTensorFlow.GenOps.Core
slice'TensorFlow.GenOps.Core
softmaxTensorFlow.GenOps.Core
softmax'TensorFlow.GenOps.Core
softmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
softmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
softplusTensorFlow.GenOps.Core
softplus'TensorFlow.GenOps.Core
softplusGradTensorFlow.GenOps.Core
softplusGrad'TensorFlow.GenOps.Core
softsignTensorFlow.GenOps.Core
softsign'TensorFlow.GenOps.Core
softsignGradTensorFlow.GenOps.Core
softsignGrad'TensorFlow.GenOps.Core
spaceToBatchTensorFlow.GenOps.Core
spaceToBatch'TensorFlow.GenOps.Core
spaceToBatchNDTensorFlow.GenOps.Core
spaceToBatchND'TensorFlow.GenOps.Core
spaceToDepthTensorFlow.GenOps.Core
spaceToDepth'TensorFlow.GenOps.Core
sparseAccumulatorApplyGradientTensorFlow.GenOps.Core
sparseAccumulatorApplyGradient'TensorFlow.GenOps.Core
sparseAccumulatorTakeGradientTensorFlow.GenOps.Core
sparseAccumulatorTakeGradient'TensorFlow.GenOps.Core
sparseAddTensorFlow.GenOps.Core
sparseAdd'TensorFlow.GenOps.Core
sparseAddGradTensorFlow.GenOps.Core
sparseAddGrad'TensorFlow.GenOps.Core
sparseApplyAdadeltaTensorFlow.GenOps.Core
sparseApplyAdadelta'TensorFlow.GenOps.Core
sparseApplyAdagradTensorFlow.GenOps.Core
sparseApplyAdagrad'TensorFlow.GenOps.Core
sparseApplyAdagradDATensorFlow.GenOps.Core
sparseApplyAdagradDA'TensorFlow.GenOps.Core
sparseApplyCenteredRMSPropTensorFlow.GenOps.Core
sparseApplyCenteredRMSProp'TensorFlow.GenOps.Core
sparseApplyFtrlTensorFlow.GenOps.Core
sparseApplyFtrl'TensorFlow.GenOps.Core
sparseApplyMomentumTensorFlow.GenOps.Core
sparseApplyMomentum'TensorFlow.GenOps.Core
sparseApplyProximalAdagradTensorFlow.GenOps.Core
sparseApplyProximalAdagrad'TensorFlow.GenOps.Core
sparseApplyProximalGradientDescentTensorFlow.GenOps.Core
sparseApplyProximalGradientDescent'TensorFlow.GenOps.Core
sparseApplyRMSPropTensorFlow.GenOps.Core
sparseApplyRMSProp'TensorFlow.GenOps.Core
sparseConcatTensorFlow.GenOps.Core
sparseConcat'TensorFlow.GenOps.Core
sparseConditionalAccumulatorTensorFlow.GenOps.Core
sparseConditionalAccumulator'TensorFlow.GenOps.Core
sparseDenseCwiseAddTensorFlow.GenOps.Core
sparseDenseCwiseAdd'TensorFlow.GenOps.Core
sparseDenseCwiseDivTensorFlow.GenOps.Core
sparseDenseCwiseDiv'TensorFlow.GenOps.Core
sparseDenseCwiseMulTensorFlow.GenOps.Core
sparseDenseCwiseMul'TensorFlow.GenOps.Core
sparseMatMulTensorFlow.GenOps.Core
sparseMatMul'TensorFlow.GenOps.Core
sparseReduceSumTensorFlow.GenOps.Core
sparseReduceSum'TensorFlow.GenOps.Core
sparseReduceSumSparseTensorFlow.GenOps.Core
sparseReduceSumSparse'TensorFlow.GenOps.Core
sparseReorderTensorFlow.GenOps.Core
sparseReorder'TensorFlow.GenOps.Core
sparseReshapeTensorFlow.GenOps.Core
sparseReshape'TensorFlow.GenOps.Core
sparseSegmentMeanTensorFlow.GenOps.Core
sparseSegmentMean'TensorFlow.GenOps.Core
sparseSegmentMeanGradTensorFlow.GenOps.Core
sparseSegmentMeanGrad'TensorFlow.GenOps.Core
sparseSegmentSqrtNTensorFlow.GenOps.Core
sparseSegmentSqrtN'TensorFlow.GenOps.Core
sparseSegmentSqrtNGradTensorFlow.GenOps.Core
sparseSegmentSqrtNGrad'TensorFlow.GenOps.Core
sparseSegmentSumTensorFlow.GenOps.Core
sparseSegmentSum'TensorFlow.GenOps.Core
sparseSoftmaxTensorFlow.GenOps.Core
sparseSoftmax'TensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogitsTensorFlow.GenOps.Core
sparseSoftmaxCrossEntropyWithLogits'TensorFlow.GenOps.Core
sparseSparseMaximumTensorFlow.GenOps.Core
sparseSparseMaximum'TensorFlow.GenOps.Core
sparseSparseMinimumTensorFlow.GenOps.Core
sparseSparseMinimum'TensorFlow.GenOps.Core
sparseSplitTensorFlow.GenOps.Core
sparseSplit'TensorFlow.GenOps.Core
sparseTensorDenseAddTensorFlow.GenOps.Core
sparseTensorDenseAdd'TensorFlow.GenOps.Core
sparseTensorDenseMatMulTensorFlow.GenOps.Core
sparseTensorDenseMatMul'TensorFlow.GenOps.Core
sparseToDenseTensorFlow.GenOps.Core
sparseToDense'TensorFlow.GenOps.Core
sparseToSparseSetOperationTensorFlow.GenOps.Core
sparseToSparseSetOperation'TensorFlow.GenOps.Core
splitTensorFlow.GenOps.Core
split'TensorFlow.GenOps.Core
splitVTensorFlow.GenOps.Core
splitV'TensorFlow.GenOps.Core
sqrtTensorFlow.GenOps.Core
sqrt'TensorFlow.GenOps.Core
sqrtGradTensorFlow.GenOps.Core
sqrtGrad'TensorFlow.GenOps.Core
squareTensorFlow.GenOps.Core
square'TensorFlow.GenOps.Core
squaredDifferenceTensorFlow.GenOps.Core
squaredDifference'TensorFlow.GenOps.Core
squeezeTensorFlow.GenOps.Core
squeeze'TensorFlow.GenOps.Core
stackTensorFlow.GenOps.Core
stack'TensorFlow.GenOps.Core
stackCloseTensorFlow.GenOps.Core
stackClose'TensorFlow.GenOps.Core
stackPopTensorFlow.GenOps.Core
stackPop'TensorFlow.GenOps.Core
stackPushTensorFlow.GenOps.Core
stackPush'TensorFlow.GenOps.Core
stageTensorFlow.GenOps.Core
stage'TensorFlow.GenOps.Core
stopGradientTensorFlow.GenOps.Core
stopGradient'TensorFlow.GenOps.Core
stridedSliceTensorFlow.GenOps.Core
stridedSlice'TensorFlow.GenOps.Core
stridedSliceAssignTensorFlow.GenOps.Core
stridedSliceAssign'TensorFlow.GenOps.Core
stridedSliceGradTensorFlow.GenOps.Core
stridedSliceGrad'TensorFlow.GenOps.Core
stringJoinTensorFlow.GenOps.Core
stringJoin'TensorFlow.GenOps.Core
stringSplitTensorFlow.GenOps.Core
stringSplit'TensorFlow.GenOps.Core
stringToHashBucketTensorFlow.GenOps.Core
stringToHashBucket'TensorFlow.GenOps.Core
stringToHashBucketFastTensorFlow.GenOps.Core
stringToHashBucketFast'TensorFlow.GenOps.Core
stringToHashBucketStrongTensorFlow.GenOps.Core
stringToHashBucketStrong'TensorFlow.GenOps.Core
stringToNumberTensorFlow.GenOps.Core
stringToNumber'TensorFlow.GenOps.Core
subTensorFlow.GenOps.Core
sub'TensorFlow.GenOps.Core
substrTensorFlow.GenOps.Core
substr'TensorFlow.GenOps.Core
sumTensorFlow.GenOps.Core
sum'TensorFlow.GenOps.Core
svdTensorFlow.GenOps.Core
svd'TensorFlow.GenOps.Core
switchTensorFlow.GenOps.Core
switch'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html index efc0367..0150437 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-T.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - T)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Index - T

takeManySparseFromTensorsMapTensorFlow.GenOps.Core
takeManySparseFromTensorsMap'TensorFlow.GenOps.Core
tanTensorFlow.GenOps.Core
tan'TensorFlow.GenOps.Core
tanhTensorFlow.GenOps.Core
tanh'TensorFlow.GenOps.Core
tanhGradTensorFlow.GenOps.Core
tanhGrad'TensorFlow.GenOps.Core
temporaryVariableTensorFlow.GenOps.Core
temporaryVariable'TensorFlow.GenOps.Core
tensorArrayTensorFlow.GenOps.Core
tensorArray'TensorFlow.GenOps.Core
tensorArrayCloseTensorFlow.GenOps.Core
tensorArrayClose'TensorFlow.GenOps.Core
tensorArrayCloseV2TensorFlow.GenOps.Core
tensorArrayCloseV2'TensorFlow.GenOps.Core
tensorArrayCloseV3TensorFlow.GenOps.Core
tensorArrayCloseV3'TensorFlow.GenOps.Core
tensorArrayConcatTensorFlow.GenOps.Core
tensorArrayConcat'TensorFlow.GenOps.Core
tensorArrayConcatV2TensorFlow.GenOps.Core
tensorArrayConcatV2'TensorFlow.GenOps.Core
tensorArrayConcatV3TensorFlow.GenOps.Core
tensorArrayConcatV3'TensorFlow.GenOps.Core
tensorArrayGatherTensorFlow.GenOps.Core
tensorArrayGather'TensorFlow.GenOps.Core
tensorArrayGatherV2TensorFlow.GenOps.Core
tensorArrayGatherV2'TensorFlow.GenOps.Core
tensorArrayGatherV3TensorFlow.GenOps.Core
tensorArrayGatherV3'TensorFlow.GenOps.Core
tensorArrayGradTensorFlow.GenOps.Core
tensorArrayGrad'TensorFlow.GenOps.Core
tensorArrayGradV2TensorFlow.GenOps.Core
tensorArrayGradV2'TensorFlow.GenOps.Core
tensorArrayGradV3TensorFlow.GenOps.Core
tensorArrayGradV3'TensorFlow.GenOps.Core
tensorArrayPackTensorFlow.GenOps.Core
tensorArrayPack'TensorFlow.GenOps.Core
tensorArrayReadTensorFlow.GenOps.Core
tensorArrayRead'TensorFlow.GenOps.Core
tensorArrayReadV2TensorFlow.GenOps.Core
tensorArrayReadV2'TensorFlow.GenOps.Core
tensorArrayReadV3TensorFlow.GenOps.Core
tensorArrayReadV3'TensorFlow.GenOps.Core
tensorArrayScatterTensorFlow.GenOps.Core
tensorArrayScatter'TensorFlow.GenOps.Core
tensorArrayScatterV2TensorFlow.GenOps.Core
tensorArrayScatterV2'TensorFlow.GenOps.Core
tensorArrayScatterV3TensorFlow.GenOps.Core
tensorArrayScatterV3'TensorFlow.GenOps.Core
tensorArraySizeTensorFlow.GenOps.Core
tensorArraySize'TensorFlow.GenOps.Core
tensorArraySizeV2TensorFlow.GenOps.Core
tensorArraySizeV2'TensorFlow.GenOps.Core
tensorArraySizeV3TensorFlow.GenOps.Core
tensorArraySizeV3'TensorFlow.GenOps.Core
tensorArraySplitTensorFlow.GenOps.Core
tensorArraySplit'TensorFlow.GenOps.Core
tensorArraySplitV2TensorFlow.GenOps.Core
tensorArraySplitV2'TensorFlow.GenOps.Core
tensorArraySplitV3TensorFlow.GenOps.Core
tensorArraySplitV3'TensorFlow.GenOps.Core
tensorArrayUnpackTensorFlow.GenOps.Core
tensorArrayUnpack'TensorFlow.GenOps.Core
tensorArrayV2TensorFlow.GenOps.Core
tensorArrayV2'TensorFlow.GenOps.Core
tensorArrayV3TensorFlow.GenOps.Core
tensorArrayV3'TensorFlow.GenOps.Core
tensorArrayWriteTensorFlow.GenOps.Core
tensorArrayWrite'TensorFlow.GenOps.Core
tensorArrayWriteV2TensorFlow.GenOps.Core
tensorArrayWriteV2'TensorFlow.GenOps.Core
tensorArrayWriteV3TensorFlow.GenOps.Core
tensorArrayWriteV3'TensorFlow.GenOps.Core
tensorSummaryTensorFlow.GenOps.Core
tensorSummary'TensorFlow.GenOps.Core
textLineReaderTensorFlow.GenOps.Core
textLineReader'TensorFlow.GenOps.Core
textLineReaderV2TensorFlow.GenOps.Core
textLineReaderV2'TensorFlow.GenOps.Core
tFRecordReaderTensorFlow.GenOps.Core
tFRecordReader'TensorFlow.GenOps.Core
tFRecordReaderV2TensorFlow.GenOps.Core
tFRecordReaderV2'TensorFlow.GenOps.Core
threadUnsafeUnigramCandidateSamplerTensorFlow.GenOps.Core
threadUnsafeUnigramCandidateSampler'TensorFlow.GenOps.Core
tileTensorFlow.GenOps.Core
tile'TensorFlow.GenOps.Core
tileGradTensorFlow.GenOps.Core
tileGrad'TensorFlow.GenOps.Core
topKTensorFlow.GenOps.Core
topK'TensorFlow.GenOps.Core
topKV2TensorFlow.GenOps.Core
topKV2'TensorFlow.GenOps.Core
transposeTensorFlow.GenOps.Core
transpose'TensorFlow.GenOps.Core
truncateDivTensorFlow.GenOps.Core
truncateDiv'TensorFlow.GenOps.Core
truncatedNormalTensorFlow.GenOps.Core
truncatedNormal'TensorFlow.GenOps.Core
truncateModTensorFlow.GenOps.Core
truncateMod'TensorFlow.GenOps.Core
\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html index eb6bf96..169f2b8 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-U.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - U)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html index 860ad3c..4c456ab 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-V.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - V)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html index 365636f..a57b2b9 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-W.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - W)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html index 564dda7..72fe0ad 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/doc-index-Z.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops. (Index - Z)

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/index.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/index.html index 42c5482..06c6208 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/index.html @@ -1,4 +1,4 @@ tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Code generated signatures for the Ops in libtensorflow_c.

Modules

\ No newline at end of file +

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

tensorflow-core-ops-0.1.0.0: Haskell wrappers for Core Tensorflow Ops.

Code generated signatures for the Ops in libtensorflow.

Modules

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html index 070cda2..677b05f 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/mini_TensorFlow-GenOps-Core.html @@ -1,4 +1,4 @@ TensorFlow.GenOps.Core

TensorFlow.GenOps.Core

\ No newline at end of file +

TensorFlow.GenOps.Core

\ No newline at end of file diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/src/TensorFlow-GenOps-Core.html b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/TensorFlow-GenOps-Core.html deleted file mode 100644 index b241f4e..0000000 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/src/TensorFlow-GenOps-Core.html +++ /dev/null @@ -1,27756 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/TensorFlow/GenOps/Core.hs - - - -
{-# LANGUAGE ConstraintKinds #-}
-{-# LANGUAGE DataKinds #-}
-{-# LANGUAGE FlexibleInstances #-}
-{-# LANGUAGE OverloadedStrings #-}
-{-# LANGUAGE ScopedTypeVariables #-}
-{-# OPTIONS_GHC -fno-warn-name-shadowing #-}
-{-# OPTIONS_GHC -fno-warn-incomplete-patterns #-}
-module TensorFlow.GenOps.Core where
-
-import Data.ByteString (ByteString)
-import Data.Complex (Complex)
-import Data.Int (Int8, Int16, Int32, Int64)
-import Data.Word (Word8, Word16)
-import Lens.Family2 ((.~), (&))
-import TensorFlow.Build
-import TensorFlow.BuildOp
-import TensorFlow.Output (ResourceHandle)
-import TensorFlow.Tensor
-import TensorFlow.Types
-
--- | Receives the named tensor from send_device on recv_device.
---
--- _HostRecv requires its input on host memory whereas _Recv requires its
--- input on device memory.
-_HostRecv :: forall tensor_type . (TensorType tensor_type) =>
-             Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
-             -> Build (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive.
-_HostRecv send_device_incarnation | eqLengthGuard [] =
-    buildOp (opDef "_HostRecv"
-             & opAttr "tensor_type" .~ tensorType (undefined :: tensor_type)
-             & opAttr "send_device_incarnation" .~ send_device_incarnation)
-        
-{-
-attr { name: "tensor_type" type: "type" }
-attr {
-  description: "The name of the tensor to receive."
-  name: "tensor_name"
-  type: "string"
-}
-attr {
-  description: "The name of the device sending the tensor."
-  name: "send_device"
-  type: "string"
-}
-attr {
-  description: "The current incarnation of send_device."
-  name: "send_device_incarnation"
-  type: "int"
-}
-attr {
-  description: "The name of the device receiving the tensor."
-  name: "recv_device"
-  type: "string"
-}
-attr {
-  default_value { b: false }
-  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
-  name: "client_terminated"
-  type: "bool"
-}
-output_arg {
-  description: "The tensor to receive."
-  name: "tensor"
-  type_attr: "tensor_type"
-}
--}
-
--- | Sends the named tensor from send_device to recv_device.
---
--- _HostSend requires its input on host memory whereas _Send requires its
--- input on device memory.
-_HostSend :: forall v1 t . (TensorType t) =>
-             Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
-             -> Tensor v1 t -- ^ __tensor__: The tensor to send.
-             -> Build (ControlNode)
-_HostSend send_device_incarnation tensor | eqLengthGuard [] =
-    buildOp (opDef "_HostSend"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "send_device_incarnation" .~ send_device_incarnation)
-        tensor
-{-
-attr { name: "T" type: "type" }
-attr {
-  description: "The name of the tensor to send."
-  name: "tensor_name"
-  type: "string"
-}
-attr {
-  description: "The name of the device sending the tensor."
-  name: "send_device"
-  type: "string"
-}
-attr {
-  description: "The current incarnation of send_device."
-  name: "send_device_incarnation"
-  type: "int"
-}
-attr {
-  description: "The name of the device receiving the tensor."
-  name: "recv_device"
-  type: "string"
-}
-attr {
-  default_value { b: false }
-  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
-  name: "client_terminated"
-  type: "bool"
-}
-input_arg {
-  description: "The tensor to send." name: "tensor" type_attr: "T"
-}
--}
-
--- | Receives the named tensor from send_device on recv_device.
-
-_Recv :: forall tensor_type . (TensorType tensor_type) =>
-         Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
-         -> Build (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive.
-_Recv send_device_incarnation | eqLengthGuard [] =
-    buildOp (opDef "_Recv"
-             & opAttr "tensor_type" .~ tensorType (undefined :: tensor_type)
-             & opAttr "send_device_incarnation" .~ send_device_incarnation)
-        
-{-
-attr { name: "tensor_type" type: "type" }
-attr {
-  description: "The name of the tensor to receive."
-  name: "tensor_name"
-  type: "string"
-}
-attr {
-  description: "The name of the device sending the tensor."
-  name: "send_device"
-  type: "string"
-}
-attr {
-  description: "The current incarnation of send_device."
-  name: "send_device_incarnation"
-  type: "int"
-}
-attr {
-  description: "The name of the device receiving the tensor."
-  name: "recv_device"
-  type: "string"
-}
-attr {
-  default_value { b: false }
-  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
-  name: "client_terminated"
-  type: "bool"
-}
-output_arg {
-  description: "The tensor to receive."
-  name: "tensor"
-  type_attr: "tensor_type"
-}
--}
-
--- | Sends the named tensor from send_device to recv_device.
-
-_Send :: forall v1 t . (TensorType t) =>
-         Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device.
-         -> Tensor v1 t -- ^ __tensor__: The tensor to send.
-         -> Build (ControlNode)
-_Send send_device_incarnation tensor | eqLengthGuard [] =
-    buildOp (opDef "_Send"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "send_device_incarnation" .~ send_device_incarnation)
-        tensor
-{-
-attr { name: "T" type: "type" }
-attr {
-  description: "The name of the tensor to send."
-  name: "tensor_name"
-  type: "string"
-}
-attr {
-  description: "The name of the device sending the tensor."
-  name: "send_device"
-  type: "string"
-}
-attr {
-  description: "The current incarnation of send_device."
-  name: "send_device_incarnation"
-  type: "int"
-}
-attr {
-  description: "The name of the device receiving the tensor."
-  name: "recv_device"
-  type: "string"
-}
-attr {
-  default_value { b: false }
-  description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller."
-  name: "client_terminated"
-  type: "bool"
-}
-input_arg {
-  description: "The tensor to send." name: "tensor" type_attr: "T"
-}
--}
-
--- | Does nothing. Only useful as a placeholder for control edges.
-
-noOp :: ControlNode
-noOp  | eqLengthGuard [] =
-    buildOp (opDef "NoOp")
-        
-{-
-
--}
-
--- | A graph node which represents a return value of a function.
-
-_Retval :: forall v1 t . (TensorType t) =>
-           Data.Int.Int64 -- ^ __index__: This return value is the index-th return value of the function.
-           -> Tensor v1 t -- ^ __input__: The return value.
-           -> Build (ControlNode)
-_Retval index input | eqLengthGuard [] =
-    buildOp (opDef "_Retval"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "index" .~ index)
-        input
-{-
-attr { name: "T" type: "type" }
-attr {
-  description: "This return value is the index-th return value of the function."
-  has_minimum: true
-  name: "index"
-  type: "int"
-}
-input_arg {
-  description: "The return value." name: "input" type_attr: "T"
-}
--}
-
--- | A graph node which represents an argument to a function.
-
-_Arg :: forall t . (TensorType t) =>
-        Data.Int.Int64 -- ^ __index__: This argument is the index-th argument of the function.
-        -> Build (Tensor Value t) -- ^ __output__: The argument.
-_Arg index | eqLengthGuard [] =
-    buildOp (opDef "_Arg"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "index" .~ index)
-        
-{-
-attr { name: "T" type: "type" }
-attr {
-  description: "This argument is the index-th argument of the function."
-  has_minimum: true
-  name: "index"
-  type: "int"
-}
-output_arg {
-  description: "The argument." name: "output" type_attr: "T"
-}
--}
-
--- | Quantized Batch normalization.
---
--- This op is deprecated and will be removed in the future. Prefer
--- `tf.nn.batch_normalization`.
-quantizedBatchNormWithGlobalNormalization :: forall v1 v2 v3 v4 v5 v6 v7 v8 v9
-                                             v10 v11 v12 v13 v14 v15 tinput
-                                             out_type . (TensorType tinput,
-                                                         OneOf '[Data.Int.Int16,
-                                                                 Data.Int.Int32,
-                                                                 Data.Word.Word16,
-                                                                 Data.Word.Word8] tinput,
-                                                         TensorType out_type,
-                                                         OneOf '[Data.Int.Int16,
-                                                                 Data.Int.Int32,
-                                                                 Data.Word.Word16,
-                                                                 Data.Word.Word8] out_type) =>
-                                             Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor
-                                                  -- needs to be multiplied with gamma.
-                                             -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0.
-                                             -> Tensor v1 tinput -- ^ __t__: A 4D input Tensor.
-                                             -> Tensor v2 Float -- ^ __t_min__: The value represented by the lowest quantized input.
-                                             -> Tensor v3 Float -- ^ __t_max__: The value represented by the highest quantized input.
-                                             -> Tensor v4 tinput -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.
-                                                                 -- This is the first output from tf.nn.moments,
-                                                                 -- or a saved moving average thereof.
-                                             -> Tensor v5 Float -- ^ __m_min__: The value represented by the lowest quantized mean.
-                                             -> Tensor v6 Float -- ^ __m_max__: The value represented by the highest quantized mean.
-                                             -> Tensor v7 tinput -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.
-                                                                 -- This is the second output from tf.nn.moments,
-                                                                 -- or a saved moving average thereof.
-                                             -> Tensor v8 Float -- ^ __v_min__: The value represented by the lowest quantized variance.
-                                             -> Tensor v9 Float -- ^ __v_max__: The value represented by the highest quantized variance.
-                                             -> Tensor v10 tinput -- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t.
-                                                                  -- An offset to be added to the normalized tensor.
-                                             -> Tensor v11 Float -- ^ __beta_min__: The value represented by the lowest quantized offset.
-                                             -> Tensor v12 Float -- ^ __beta_max__: The value represented by the highest quantized offset.
-                                             -> Tensor v13 tinput -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.
-                                                                  -- If "scale_after_normalization" is true, this tensor will be multiplied
-                                                                  -- with the normalized tensor.
-                                             -> Tensor v14 Float -- ^ __gamma_min__: The value represented by the lowest quantized gamma.
-                                             -> Tensor v15 Float -- ^ __gamma_max__: The value represented by the highest quantized gamma.
-                                             -> (Tensor Value out_type,
-                                                 Tensor Value Float,
-                                                 Tensor Value Float)
-                                             -- ^ (__result__, __result_min__, __result_max__)
-                                             --
-                                             -- * __result__
-                                             --
-                                             -- * __result_min__
-                                             --
-                                             -- * __result_max__
-quantizedBatchNormWithGlobalNormalization scale_after_normalization
-                                          variance_epsilon t t_min t_max m m_min
-                                          m_max v v_min v_max beta beta_min
-                                          beta_max gamma gamma_min
-                                          gamma_max | eqLengthGuard [] =
-    buildOp (opDef "QuantizedBatchNormWithGlobalNormalization"
-             & opAttr "Tinput" .~ tensorType (undefined :: tinput)
-             & opAttr "out_type" .~ tensorType (undefined :: out_type)
-             & opAttr "scale_after_normalization" .~ scale_after_normalization
-             & opAttr "variance_epsilon" .~ variance_epsilon)
-        t t_min t_max m m_min m_max v v_min v_max beta beta_min beta_max gamma
-        gamma_min gamma_max
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "Tinput"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "out_type"
-  type: "type"
-}
-attr {
-  description: "A small float number to avoid dividing by 0."
-  name: "variance_epsilon"
-  type: "float"
-}
-attr {
-  description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
-  name: "scale_after_normalization"
-  type: "bool"
-}
-input_arg {
-  description: "A 4D input Tensor." name: "t" type_attr: "Tinput"
-}
-input_arg {
-  description: "The value represented by the lowest quantized input."
-  name: "t_min"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The value represented by the highest quantized input."
-  name: "t_max"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
-  name: "m"
-  type_attr: "Tinput"
-}
-input_arg {
-  description: "The value represented by the lowest quantized mean."
-  name: "m_min"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The value represented by the highest quantized mean."
-  name: "m_max"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
-  name: "v"
-  type_attr: "Tinput"
-}
-input_arg {
-  description: "The value represented by the lowest quantized variance."
-  name: "v_min"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The value represented by the highest quantized variance."
-  name: "v_max"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor."
-  name: "beta"
-  type_attr: "Tinput"
-}
-input_arg {
-  description: "The value represented by the lowest quantized offset."
-  name: "beta_min"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The value represented by the highest quantized offset."
-  name: "beta_max"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor."
-  name: "gamma"
-  type_attr: "Tinput"
-}
-input_arg {
-  description: "The value represented by the lowest quantized gamma."
-  name: "gamma_min"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The value represented by the highest quantized gamma."
-  name: "gamma_max"
-  type: DT_FLOAT
-}
-output_arg { name: "result" type_attr: "out_type" }
-output_arg { name: "result_min" type: DT_FLOAT }
-output_arg { name: "result_max" type: DT_FLOAT }
--}
-
--- | Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
-
-quantizedRelu6 :: forall v1 v2 v3 tinput out_type . (TensorType tinput,
-                                                     OneOf '[Data.Int.Int16,
-                                                             Data.Int.Int32,
-                                                             Data.Word.Word16,
-                                                             Data.Word.Word8] tinput,
-                                                     TensorType out_type,
-                                                     OneOf '[Data.Int.Int16,
-                                                             Data.Int.Int32,
-                                                             Data.Word.Word16,
-                                                             Data.Word.Word8] out_type) =>
-                  Tensor v1 tinput -- ^ __features__
-                  -> Tensor v2 Float -- ^ __min_features__: The float value that the lowest quantized value represents.
-                  -> Tensor v3 Float -- ^ __max_features__: The float value that the highest quantized value represents.
-                  -> (Tensor Value out_type, Tensor Value Float,
-                      Tensor Value Float)
-                  -- ^ (__activations__, __min_activations__, __max_activations__)
-                  --
-                  -- * __activations__: Has the same output shape as "features".
-                  --
-                  -- * __min_activations__: The float value that the lowest quantized value represents.
-                  --
-                  -- * __max_activations__: The float value that the highest quantized value represents.
-quantizedRelu6 features min_features max_features | eqLengthGuard [] =
-    buildOp (opDef "QuantizedRelu6"
-             & opAttr "Tinput" .~ tensorType (undefined :: tinput)
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        features min_features max_features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "Tinput"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  default_value { type: DT_QUINT8 }
-  name: "out_type"
-  type: "type"
-}
-input_arg { name: "features" type_attr: "Tinput" }
-input_arg {
-  description: "The float value that the lowest quantized value represents."
-  name: "min_features"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the highest quantized value represents."
-  name: "max_features"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "Has the same output shape as \"features\"."
-  name: "activations"
-  type_attr: "out_type"
-}
-output_arg {
-  description: "The float value that the lowest quantized value represents."
-  name: "min_activations"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The float value that the highest quantized value represents."
-  name: "max_activations"
-  type: DT_FLOAT
-}
--}
-
--- | Adds Tensor 'bias' to Tensor 'input' for Quantized types.
---
--- Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
-quantizedBiasAdd :: forall v1 v2 v3 v4 v5 v6 t1 t2 out_type . (TensorType t1,
-                                                               OneOf '[Data.Int.Int16,
-                                                                       Data.Int.Int32,
-                                                                       Data.Word.Word16,
-                                                                       Data.Word.Word8] t1,
-                                                               TensorType t2,
-                                                               OneOf '[Data.Int.Int16,
-                                                                       Data.Int.Int32,
-                                                                       Data.Word.Word16,
-                                                                       Data.Word.Word8] t2,
-                                                               TensorType out_type,
-                                                               OneOf '[Data.Int.Int16,
-                                                                       Data.Int.Int32,
-                                                                       Data.Word.Word16,
-                                                                       Data.Word.Word8] out_type) =>
-                    Tensor v1 t1 -- ^ __input__
-                    -> Tensor v2 t2 -- ^ __bias__: A 1D bias Tensor with size matching the last dimension of 'input'.
-                    -> Tensor v3 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
-                    -> Tensor v4 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
-                    -> Tensor v5 Float -- ^ __min_bias__: The float value that the lowest quantized bias value represents.
-                    -> Tensor v6 Float -- ^ __max_bias__: The float value that the highest quantized bias value represents.
-                    -> (Tensor Value out_type, Tensor Value Float,
-                        Tensor Value Float)
-                    -- ^ (__output__, __min_out__, __max_out__)
-                    --
-                    -- * __output__
-                    --
-                    -- * __min_out__: The float value that the lowest quantized output value represents.
-                    --
-                    -- * __max_out__: The float value that the highest quantized output value represents.
-quantizedBiasAdd input bias min_input max_input min_bias
-                 max_bias | eqLengthGuard [] =
-    buildOp (opDef "QuantizedBiasAdd"
-             & opAttr "T1" .~ tensorType (undefined :: t1)
-             & opAttr "T2" .~ tensorType (undefined :: t2)
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        input bias min_input max_input min_bias max_bias
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "T1"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "T2"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "out_type"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T1" }
-input_arg {
-  description: "A 1D bias Tensor with size matching the last dimension of \'input\'."
-  name: "bias"
-  type_attr: "T2"
-}
-input_arg {
-  description: "The float value that the lowest quantized input value represents."
-  name: "min_input"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the highest quantized input value represents."
-  name: "max_input"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the lowest quantized bias value represents."
-  name: "min_bias"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the highest quantized bias value represents."
-  name: "max_bias"
-  type: DT_FLOAT
-}
-output_arg { name: "output" type_attr: "out_type" }
-output_arg {
-  description: "The float value that the lowest quantized output value represents."
-  name: "min_out"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The float value that the highest quantized output value represents."
-  name: "max_out"
-  type: DT_FLOAT
-}
--}
-
--- | Computes gradient of the FractionalAvgPool function.
---
--- Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
--- FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
--- out_backprop to those indices that form the same pooling cell. Therefore, we
--- just need to know the shape of original input tensor, instead of the whole
--- tensor.
-fractionalAvgPoolGrad :: forall v1 v2 v3 v4 t . (TensorType t,
-                                                 OneOf '[Data.Int.Int32,
-                                                         Data.Int.Int64, Double,
-                                                         Float] t) =>
-                         Tensor v1 Data.Int.Int64 -- ^ __orig_input_tensor_shape__: Original input tensor shape for `fractional_avg_pool`
-                         -> Tensor v2 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`.  Gradients
-                                        -- w.r.t. the output of `fractional_avg_pool`.
-                         -> Tensor v3 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with
-                                                     -- col_pooling_sequence.
-                         -> Tensor v4 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with
-                                                     -- row_pooling sequence.
-                         -> Tensor Value t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `fractional_avg_pool`.
-fractionalAvgPoolGrad orig_input_tensor_shape out_backprop row_pooling_sequence
-                      col_pooling_sequence | eqLengthGuard [] =
-    buildOp (opDef "FractionalAvgPoolGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        orig_input_tensor_shape out_backprop row_pooling_sequence
-        col_pooling_sequence
-{-
-attr {
-  default_value { b: false }
-  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling."
-  name: "overlapping"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Original input tensor shape for `fractional_avg_pool`"
-  name: "orig_input_tensor_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`.  Gradients\nw.r.t. the output of `fractional_avg_pool`."
-  name: "out_backprop"
-  type_attr: "T"
-}
-input_arg {
-  description: "row pooling sequence, form pooling region with\ncol_pooling_sequence."
-  name: "row_pooling_sequence"
-  type: DT_INT64
-}
-input_arg {
-  description: "column pooling sequence, form pooling region with\nrow_pooling sequence."
-  name: "col_pooling_sequence"
-  type: DT_INT64
-}
-output_arg {
-  description: "4-D.  Gradients w.r.t. the input of `fractional_avg_pool`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes gradient of the FractionalMaxPool function.
-
-fractionalMaxPoolGrad :: forall v1 v2 v3 v4 v5 t . (TensorType t,
-                                                    OneOf '[Data.Int.Int32,
-                                                            Data.Int.Int64,
-                                                            Double, Float] t) =>
-                         Tensor v1 t -- ^ __orig_input__: Original input for `fractional_max_pool`
-                         -> Tensor v2 t -- ^ __orig_output__: Original output for `fractional_max_pool`
-                         -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`.  Gradients
-                                        -- w.r.t. the output of `fractional_max_pool`.
-                         -> Tensor v4 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with
-                                                     -- col_pooling_sequence.
-                         -> Tensor v5 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with
-                                                     -- row_pooling sequence.
-                         -> Tensor Value t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `fractional_max_pool`.
-fractionalMaxPoolGrad orig_input orig_output out_backprop row_pooling_sequence
-                      col_pooling_sequence | eqLengthGuard [] =
-    buildOp (opDef "FractionalMaxPoolGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        orig_input orig_output out_backprop row_pooling_sequence
-        col_pooling_sequence
-{-
-attr {
-  default_value { b: false }
-  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling."
-  name: "overlapping"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Original input for `fractional_max_pool`"
-  name: "orig_input"
-  type_attr: "T"
-}
-input_arg {
-  description: "Original output for `fractional_max_pool`"
-  name: "orig_output"
-  type_attr: "T"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`.  Gradients\nw.r.t. the output of `fractional_max_pool`."
-  name: "out_backprop"
-  type_attr: "T"
-}
-input_arg {
-  description: "row pooling sequence, form pooling region with\ncol_pooling_sequence."
-  name: "row_pooling_sequence"
-  type: DT_INT64
-}
-input_arg {
-  description: "column pooling sequence, form pooling region with\nrow_pooling sequence."
-  name: "col_pooling_sequence"
-  type: DT_INT64
-}
-output_arg {
-  description: "4-D.  Gradients w.r.t. the input of `fractional_max_pool`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Performs fractional max pooling on the input.
---
--- Fractional max pooling is slightly different than regular max pooling.  In
--- regular max pooling, you downsize an input set by taking the maximum value of
--- smaller N x N subsections of the set (often 2x2), and try to reduce the set by
--- a factor of N, where N is an integer.  Fractional max pooling, as you might
--- expect from the word "fractional", means that the overall reduction ratio N
--- does not have to be an integer.
--- 
--- The sizes of the pooling regions are generated randomly but are fairly uniform.
--- For example, let's look at the height dimension, and the constraints on the
--- list of rows that will be pool boundaries.
--- 
--- First we define the following:
--- 
--- 1.  input_row_length : the number of rows from the input set
--- 2.  output_row_length : which will be smaller than the input
--- 3.  alpha = input_row_length / output_row_length : our reduction ratio
--- 4.  K = floor(alpha)
--- 5.  row_pooling_sequence : this is the result list of pool boundary rows
--- 
--- Then, row_pooling_sequence should satisfy:
--- 
--- 1.  a[0] = 0 : the first value of the sequence is 0
--- 2.  a[end] = input_row_length : the last value of the sequence is the size
--- 3.  K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
--- 4.  length(row_pooling_sequence) = output_row_length+1
--- 
--- For more details on fractional max pooling, see this paper:
--- [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
-fractionalMaxPool :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int32,
-                                                          Data.Int.Int64,
-                                                          Double, Float] t) =>
-                     Tensor v1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
-                     -> (Tensor Value t, Tensor Value Data.Int.Int64,
-                         Tensor Value Data.Int.Int64)
-                     -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__)
-                     --
-                     -- * __output__: output tensor after fractional max pooling.
-                     --
-                     -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient.
-                     --
-                     -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient.
-fractionalMaxPool value | eqLengthGuard [] =
-    buildOp (opDef "FractionalMaxPool"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        value
-{-
-attr {
-  description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively."
-  has_minimum: true
-  minimum: 4
-  name: "pooling_ratio"
-  type: "list(float)"
-}
-attr {
-  default_value { b: false }
-  description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random."
-  name: "pseudo_random"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling."
-  name: "overlapping"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalMaxPool node in the computation graph. Mainly used\nin unit test to make FractionalMaxPool deterministic."
-  name: "deterministic"
-  type: "bool"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "An second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "value"
-  type_attr: "T"
-}
-output_arg {
-  description: "output tensor after fractional max pooling."
-  name: "output"
-  type_attr: "T"
-}
-output_arg {
-  description: "row pooling sequence, needed to calculate gradient."
-  name: "row_pooling_sequence"
-  type: DT_INT64
-}
-output_arg {
-  description: "column pooling sequence, needed to calculate gradient."
-  name: "col_pooling_sequence"
-  type: DT_INT64
-}
--}
-
--- | Finds values and indices of the `k` largest elements for the last dimension.
---
--- If the input is a vector (rank-1), finds the `k` largest entries in the vector
--- and outputs their values and indices as vectors.  Thus `values[j]` is the
--- `j`-th largest entry in `input`, and its index is `indices[j]`.
--- 
--- For matrices (resp. higher rank input), computes the top `k` entries in each
--- row (resp. vector along the last dimension).  Thus,
--- 
---     values.shape = indices.shape = input.shape[:-1] + [k]
--- 
--- If two elements are equal, the lower-index element appears first.
--- 
--- If `k` varies dynamically, use `TopKV2` below.
-topK :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
-                                             Data.Int.Int64, Data.Int.Int8,
-                                             Data.Word.Word16, Data.Word.Word8,
-                                             Double, Float] t) =>
-        Data.Int.Int64 -- ^ __k__: Number of top elements to look for along the last dimension (along each
-                       -- row for matrices).
-        -> Tensor v1 t -- ^ __input__: 1-D or higher with last dimension at least `k`.
-        -> (Tensor Value t, Tensor Value Data.Int.Int32)
-        -- ^ (__values__, __indices__)
-        --
-        -- * __values__: The `k` largest elements along each last dimensional slice.
-        --
-        -- * __indices__: The indices of `values` within the last dimension of `input`.
-topK k input | eqLengthGuard [] =
-    buildOp (opDef "TopK"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "k" .~ k)
-        input
-{-
-attr {
-  description: "Number of top elements to look for along the last dimension (along each\nrow for matrices)."
-  has_minimum: true
-  name: "k"
-  type: "int"
-}
-attr {
-  default_value { b: true }
-  description: "If true the resulting `k` elements will be sorted by the values in\ndescending order."
-  name: "sorted"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "1-D or higher with last dimension at least `k`."
-  name: "input"
-  type_attr: "T"
-}
-output_arg {
-  description: "The `k` largest elements along each last dimensional slice."
-  name: "values"
-  type_attr: "T"
-}
-output_arg {
-  description: "The indices of `values` within the last dimension of `input`."
-  name: "indices"
-  type: DT_INT32
-}
--}
-
--- | Says whether the targets are in the top `K` predictions.
---
--- This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
--- prediction for the target class is among the top `k` predictions among
--- all predictions for example `i`. Note that the behavior of `InTopK` differs
--- from the `TopK` op in its handling of ties; if multiple classes have the
--- same prediction value and straddle the top-`k` boundary, all of those
--- classes are considered to be in the top `k`.
--- 
--- More formally, let
--- 
---   \\(predictions_i\\) be the predictions for all classes for example `i`,
---   \\(targets_i\\) be the target class for example `i`,
---   \\(out_i\\) be the output for example `i`,
--- 
--- $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
-inTopK :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32,
-                                                  Data.Int.Int64] t) =>
-          Data.Int.Int64 -- ^ __k__: Number of top elements to look at for computing precision.
-          -> Tensor v1 Float -- ^ __predictions__: A `batch_size` x `classes` tensor.
-          -> Tensor v2 t -- ^ __targets__: A `batch_size` vector of class ids.
-          -> Tensor Value Bool -- ^ __precision__: Computed Precision at `k` as a `bool Tensor`.
-inTopK k predictions targets | eqLengthGuard [] =
-    buildOp (opDef "InTopK"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "k" .~ k)
-        predictions targets
-{-
-attr {
-  description: "Number of top elements to look at for computing precision."
-  name: "k"
-  type: "int"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "A `batch_size` x `classes` tensor."
-  name: "predictions"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A `batch_size` vector of class ids."
-  name: "targets"
-  type_attr: "T"
-}
-output_arg {
-  description: "Computed Precision at `k` as a `bool Tensor`."
-  name: "precision"
-  type: DT_BOOL
-}
--}
-
--- | Computes softmax cross entropy cost and gradients to backpropagate.
---
--- Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
--- a matrix of label probabilities, but rather a single label per row
--- of features.  This label is considered to have probability 1.0 for the
--- given row.
--- 
--- Inputs are the logits, not probabilities.
-sparseSoftmaxCrossEntropyWithLogits :: forall v1 v2 t tlabels . (TensorType t,
-                                                                 OneOf '[Data.Word.Word16,
-                                                                         Double,
-                                                                         Float] t,
-                                                                 TensorType tlabels,
-                                                                 OneOf '[Data.Int.Int32,
-                                                                         Data.Int.Int64] tlabels) =>
-                                       Tensor v1 t -- ^ __features__: batch_size x num_classes matrix
-                                       -> Tensor v2 tlabels -- ^ __labels__: batch_size vector with values in [0, num_classes).
-                                                            -- This is the label for the given minibatch entry.
-                                       -> (Tensor Value t, Tensor Value t)
-                                       -- ^ (__loss__, __backprop__)
-                                       --
-                                       -- * __loss__: Per example loss (batch_size vector).
-                                       --
-                                       -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix).
-sparseSoftmaxCrossEntropyWithLogits features labels | eqLengthGuard [] =
-    buildOp (opDef "SparseSoftmaxCrossEntropyWithLogits"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tlabels" .~ tensorType (undefined :: tlabels))
-        features labels
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT64 }
-  name: "Tlabels"
-  type: "type"
-}
-input_arg {
-  description: "batch_size x num_classes matrix"
-  name: "features"
-  type_attr: "T"
-}
-input_arg {
-  description: "batch_size vector with values in [0, num_classes).\nThis is the label for the given minibatch entry."
-  name: "labels"
-  type_attr: "Tlabels"
-}
-output_arg {
-  description: "Per example loss (batch_size vector)."
-  name: "loss"
-  type_attr: "T"
-}
-output_arg {
-  description: "backpropagated gradients (batch_size x num_classes matrix)."
-  name: "backprop"
-  type_attr: "T"
-}
--}
-
--- | Computes softmax cross entropy cost and gradients to backpropagate.
---
--- Inputs are the logits, not probabilities.
-softmaxCrossEntropyWithLogits :: forall v1 v2 t . (TensorType t,
-                                                   OneOf '[Data.Word.Word16,
-                                                           Double, Float] t) =>
-                                 Tensor v1 t -- ^ __features__: batch_size x num_classes matrix
-                                 -> Tensor v2 t -- ^ __labels__: batch_size x num_classes matrix
-                                                -- The caller must ensure that each batch of labels represents a valid
-                                                -- probability distribution.
-                                 -> (Tensor Value t, Tensor Value t)
-                                 -- ^ (__loss__, __backprop__)
-                                 --
-                                 -- * __loss__: Per example loss (batch_size vector).
-                                 --
-                                 -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix).
-softmaxCrossEntropyWithLogits features labels | eqLengthGuard [] =
-    buildOp (opDef "SoftmaxCrossEntropyWithLogits"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        features labels
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "batch_size x num_classes matrix"
-  name: "features"
-  type_attr: "T"
-}
-input_arg {
-  description: "batch_size x num_classes matrix\nThe caller must ensure that each batch of labels represents a valid\nprobability distribution."
-  name: "labels"
-  type_attr: "T"
-}
-output_arg {
-  description: "Per example loss (batch_size vector)."
-  name: "loss"
-  type_attr: "T"
-}
-output_arg {
-  description: "backpropagated gradients (batch_size x num_classes matrix)."
-  name: "backprop"
-  type_attr: "T"
-}
--}
-
--- | Computes log softmax activations.
---
--- For each batch `i` and class `j` we have
--- 
---     logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
-logSoftmax :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                                   Float] t) =>
-              Tensor v1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`.
-              -> Tensor Value t -- ^ __logsoftmax__: Same shape as `logits`.
-logSoftmax logits | eqLengthGuard [] =
-    buildOp (opDef "LogSoftmax"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        logits
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "2-D with shape `[batch_size, num_classes]`."
-  name: "logits"
-  type_attr: "T"
-}
-output_arg {
-  description: "Same shape as `logits`."
-  name: "logsoftmax"
-  type_attr: "T"
-}
--}
-
--- | Computes softsign gradients for a softsign operation.
-
-softsignGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                        Data.Int.Int32,
-                                                        Data.Int.Int64,
-                                                        Data.Int.Int8,
-                                                        Data.Word.Word16,
-                                                        Data.Word.Word8, Double,
-                                                        Float] t) =>
-                Tensor v1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softsign operation.
-                -> Tensor v2 t -- ^ __features__: The features passed as input to the corresponding softsign operation.
-                -> Tensor Value t -- ^ __backprops__: The gradients: `gradients / (1 + abs(-features)) ** 2`.
-softsignGrad gradients features | eqLengthGuard [] =
-    buildOp (opDef "SoftsignGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        gradients features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The backpropagated gradients to the corresponding softsign operation."
-  name: "gradients"
-  type_attr: "T"
-}
-input_arg {
-  description: "The features passed as input to the corresponding softsign operation."
-  name: "features"
-  type_attr: "T"
-}
-output_arg {
-  description: "The gradients: `gradients / (1 + abs(-features)) ** 2`."
-  name: "backprops"
-  type_attr: "T"
-}
--}
-
--- | Computes softplus: `log(exp(features) + 1)`.
-
-softplus :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Data.Word.Word16,
-                                                 Data.Word.Word8, Double,
-                                                 Float] t) =>
-            Tensor v1 t -- ^ __features__
-            -> Tensor Value t -- ^ __activations__
-softplus features | eqLengthGuard [] =
-    buildOp (opDef "Softplus"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "features" type_attr: "T" }
-output_arg { name: "activations" type_attr: "T" }
--}
-
--- | Computes gradients for the exponential linear (Elu) operation.
-
-eluGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                   Data.Int.Int32,
-                                                   Data.Int.Int64,
-                                                   Data.Int.Int8,
-                                                   Data.Word.Word16,
-                                                   Data.Word.Word8, Double,
-                                                   Float] t) =>
-           Tensor v1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Elu operation.
-           -> Tensor v2 t -- ^ __outputs__: The outputs of the corresponding Elu operation.
-           -> Tensor Value t -- ^ __backprops__: The gradients: `gradients * (outputs + 1)` if outputs < 0,
-           -- `gradients` otherwise.
-eluGrad gradients outputs | eqLengthGuard [] =
-    buildOp (opDef "EluGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        gradients outputs
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The backpropagated gradients to the corresponding Elu operation."
-  name: "gradients"
-  type_attr: "T"
-}
-input_arg {
-  description: "The outputs of the corresponding Elu operation."
-  name: "outputs"
-  type_attr: "T"
-}
-output_arg {
-  description: "The gradients: `gradients * (outputs + 1)` if outputs < 0,\n`gradients` otherwise."
-  name: "backprops"
-  type_attr: "T"
-}
--}
-
--- | Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
---
--- See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
--- ](http://arxiv.org/abs/1511.07289)
-elu :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
-                                            Data.Int.Int64, Data.Int.Int8,
-                                            Data.Word.Word16, Data.Word.Word8,
-                                            Double, Float] t) =>
-       Tensor v1 t -- ^ __features__
-       -> Tensor Value t -- ^ __activations__
-elu features | eqLengthGuard [] =
-    buildOp (opDef "Elu"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "features" type_attr: "T" }
-output_arg { name: "activations" type_attr: "T" }
--}
-
--- | Computes rectified linear 6: `min(max(features, 0), 6)`.
-
-relu6 :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
-                                              Data.Int.Int64, Data.Int.Int8,
-                                              Data.Word.Word16, Data.Word.Word8,
-                                              Double, Float] t) =>
-         Tensor v1 t -- ^ __features__
-         -> Tensor Value t -- ^ __activations__
-relu6 features | eqLengthGuard [] =
-    buildOp (opDef "Relu6"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "features" type_attr: "T" }
-output_arg { name: "activations" type_attr: "T" }
--}
-
--- | Computes rectified linear gradients for a Relu operation.
-
-reluGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                    Data.Int.Int32,
-                                                    Data.Int.Int64,
-                                                    Data.Int.Int8,
-                                                    Data.Word.Word16,
-                                                    Data.Word.Word8, Double,
-                                                    Float] t) =>
-            Tensor v1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu operation.
-            -> Tensor v2 t -- ^ __features__: The features passed as input to the corresponding Relu operation, OR
-                           -- the outputs of that operation (both work equivalently).
-            -> Tensor Value t -- ^ __backprops__: `gradients * (features > 0)`.
-reluGrad gradients features | eqLengthGuard [] =
-    buildOp (opDef "ReluGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        gradients features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The backpropagated gradients to the corresponding Relu operation."
-  name: "gradients"
-  type_attr: "T"
-}
-input_arg {
-  description: "The features passed as input to the corresponding Relu operation, OR\nthe outputs of that operation (both work equivalently)."
-  name: "features"
-  type_attr: "T"
-}
-output_arg {
-  description: "`gradients * (features > 0)`."
-  name: "backprops"
-  type_attr: "T"
-}
--}
-
--- | Computes the gradient of morphological 2-D dilation with respect to the input.
-
-dilation2DBackpropInput :: forall v1 v2 v3 t . (TensorType t,
-                                                OneOf '[Data.Int.Int16,
-                                                        Data.Int.Int32,
-                                                        Data.Int.Int64,
-                                                        Data.Int.Int8,
-                                                        Data.Word.Word16,
-                                                        Data.Word.Word8, Double,
-                                                        Float] t) =>
-                           Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
-                           -> Tensor v2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
-                           -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`.
-                           -> Tensor Value t -- ^ __in_backprop__: 4-D with shape `[batch, in_height, in_width, depth]`.
-dilation2DBackpropInput input filter out_backprop | eqLengthGuard [] =
-    buildOp (opDef "Dilation2DBackpropInput"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input filter out_backprop
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`."
-  has_minimum: true
-  minimum: 4
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`."
-  has_minimum: true
-  minimum: 4
-  name: "rates"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "4-D with shape `[batch, in_height, in_width, depth]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "3-D with shape `[filter_height, filter_width, depth]`."
-  name: "filter"
-  type_attr: "T"
-}
-input_arg {
-  description: "4-D with shape `[batch, out_height, out_width, depth]`."
-  name: "out_backprop"
-  type_attr: "T"
-}
-output_arg {
-  description: "4-D with shape `[batch, in_height, in_width, depth]`."
-  name: "in_backprop"
-  type_attr: "T"
-}
--}
-
--- | Computes gradients of the maxpooling function.
-
-maxPoolGrad :: forall v1 v2 v3 t . (TensorType t, OneOf '[Data.Word.Word16,
-                                                          Float] t) =>
-               Tensor v1 t -- ^ __orig_input__: The original input tensor.
-               -> Tensor v2 t -- ^ __orig_output__: The original output tensor.
-               -> Tensor v3 t -- ^ __grad__: 4-D.  Gradients w.r.t. the output of `max_pool`.
-               -> Tensor Value t -- ^ __output__: Gradients w.r.t. the input to `max_pool`.
-maxPoolGrad orig_input orig_output grad | eqLengthGuard [] =
-    buildOp (opDef "MaxPoolGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        orig_input orig_output grad
-{-
-attr {
-  description: "The size of the window for each dimension of the input tensor."
-  has_minimum: true
-  minimum: 4
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the\ninput tensor."
-  has_minimum: true
-  minimum: 4
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values { list { s: "NHWC" s: "NCHW" } }
-  default_value { s: "NHWC" }
-  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
-  name: "data_format"
-  type: "string"
-}
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The original input tensor."
-  name: "orig_input"
-  type_attr: "T"
-}
-input_arg {
-  description: "The original output tensor."
-  name: "orig_output"
-  type_attr: "T"
-}
-input_arg {
-  description: "4-D.  Gradients w.r.t. the output of `max_pool`."
-  name: "grad"
-  type_attr: "T"
-}
-output_arg {
-  description: "Gradients w.r.t. the input to `max_pool`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Gradients for Local Response Normalization.
-
-lRNGrad :: forall v1 v2 v3 t . (TensorType t, OneOf '[Data.Word.Word16,
-                                                      Float] t) =>
-           Tensor v1 t -- ^ __input_grads__: 4-D with shape `[batch, height, width, channels]`.
-           -> Tensor v2 t -- ^ __input_image__: 4-D with shape `[batch, height, width, channels]`.
-           -> Tensor v3 t -- ^ __output_image__: 4-D with shape `[batch, height, width, channels]`.
-           -> Tensor Value t -- ^ __output__: The gradients for LRN.
-lRNGrad input_grads input_image output_image | eqLengthGuard [] =
-    buildOp (opDef "LRNGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input_grads input_image output_image
-{-
-attr {
-  default_value { i: 5 }
-  description: "A depth radius."
-  name: "depth_radius"
-  type: "int"
-}
-attr {
-  default_value { f: 1.0 }
-  description: "An offset (usually > 0 to avoid dividing by 0)."
-  name: "bias"
-  type: "float"
-}
-attr {
-  default_value { f: 1.0 }
-  description: "A scale factor, usually positive."
-  name: "alpha"
-  type: "float"
-}
-attr {
-  default_value { f: 0.5 }
-  description: "An exponent."
-  name: "beta"
-  type: "float"
-}
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "input_grads"
-  type_attr: "T"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "input_image"
-  type_attr: "T"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "output_image"
-  type_attr: "T"
-}
-output_arg {
-  description: "The gradients for LRN." name: "output" type_attr: "T"
-}
--}
-
--- | Computes gradients of max pooling function.
-
-maxPool3DGrad :: forall v1 v2 v3 t . (TensorType t,
-                                      OneOf '[(Data.Complex.Complex Double),
-                                              (Data.Complex.Complex Float),
-                                              Data.Int.Int16, Data.Int.Int32,
-                                              Data.Int.Int64, Data.Int.Int8,
-                                              Data.Word.Word16, Data.Word.Word8,
-                                              Double, Float] t) =>
-                 Tensor v1 Float -- ^ __orig_input__: The original input tensor.
-                 -> Tensor v2 Float -- ^ __orig_output__: The original output tensor.
-                 -> Tensor v3 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.
-                 -> Tensor Value t -- ^ __output__
-maxPool3DGrad orig_input orig_output grad | eqLengthGuard [] =
-    buildOp (opDef "MaxPool3DGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        orig_input orig_output grad
-{-
-attr {
-  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The original input tensor."
-  name: "orig_input"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The original output tensor."
-  name: "orig_output"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "Output backprop of shape `[batch, depth, rows, cols, channels]`."
-  name: "grad"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Computes the gradients of 3-D convolution with respect to the filter.
-
-conv3DBackpropFilterV2 :: forall v1 v2 v3 t . (TensorType t,
-                                               OneOf '[(Data.Complex.Complex Double),
-                                                       (Data.Complex.Complex Float),
-                                                       Data.Int.Int16,
-                                                       Data.Int.Int32,
-                                                       Data.Int.Int64,
-                                                       Data.Int.Int8,
-                                                       Data.Word.Word16,
-                                                       Data.Word.Word8, Double,
-                                                       Float] t) =>
-                          Tensor v1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
-                          -> Tensor v2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
-                                                      -- where `filter` is a 5-D
-                                                      -- `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
-                                                      -- tensor.
-                          -> Tensor v3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
-                                         -- out_channels]`.
-                          -> Tensor Value t -- ^ __output__
-conv3DBackpropFilterV2 input filter_sizes out_backprop | eqLengthGuard [] =
-    buildOp (opDef "Conv3DBackpropFilterV2"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input filter_sizes out_backprop
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "Shape `[batch, depth, rows, cols, in_channels]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 5-D\n`[filter_depth, filter_height, filter_width, in_channels, out_channels]`\ntensor."
-  name: "filter_sizes"
-  type: DT_INT32
-}
-input_arg {
-  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
-  name: "out_backprop"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Computes the gradients of 3-D convolution with respect to the filter.
-
-conv3DBackpropFilter :: forall v1 v2 v3 t . (TensorType t,
-                                             OneOf '[(Data.Complex.Complex Double),
-                                                     (Data.Complex.Complex Float),
-                                                     Data.Int.Int16,
-                                                     Data.Int.Int32,
-                                                     Data.Int.Int64,
-                                                     Data.Int.Int8,
-                                                     Data.Word.Word16,
-                                                     Data.Word.Word8, Double,
-                                                     Float] t) =>
-                        Tensor v1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
-                        -> Tensor v2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
-                                       -- `in_channels` must match between `input` and `filter`.
-                        -> Tensor v3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
-                                       -- out_channels]`.
-                        -> Tensor Value t -- ^ __output__
-conv3DBackpropFilter input filter out_backprop | eqLengthGuard [] =
-    buildOp (opDef "Conv3DBackpropFilter"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input filter out_backprop
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "Shape `[batch, depth, rows, cols, in_channels]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
-  name: "filter"
-  type_attr: "T"
-}
-input_arg {
-  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
-  name: "out_backprop"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Computes a 3-D convolution given 5-D `input` and `filter` tensors.
---
--- In signal processing, cross-correlation is a measure of similarity of
--- two waveforms as a function of a time-lag applied to one of them. This
--- is also known as a sliding dot product or sliding inner-product.
--- 
--- Our Conv3D implements a form of cross-correlation.
-conv3D :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                  (Data.Complex.Complex Float),
-                                                  Data.Int.Int16,
-                                                  Data.Int.Int32,
-                                                  Data.Int.Int64, Data.Int.Int8,
-                                                  Data.Word.Word16,
-                                                  Data.Word.Word8, Double,
-                                                  Float] t) =>
-          Tensor v1 t -- ^ __input__: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
-          -> Tensor v2 t -- ^ __filter__: Shape `[filter_depth, filter_height, filter_width, in_channels,
-                         -- out_channels]`. `in_channels` must match between `input` and `filter`.
-          -> Tensor Value t -- ^ __output__
-conv3D input filter | eqLengthGuard [] =
-    buildOp (opDef "Conv3D"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input filter
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "Shape `[batch, in_depth, in_height, in_width, in_channels]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "Shape `[filter_depth, filter_height, filter_width, in_channels,\nout_channels]`. `in_channels` must match between `input` and `filter`."
-  name: "filter"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Computes the gradients of depthwise convolution with respect to the filter.
-
-depthwiseConv2dNativeBackpropFilter :: forall v1 v2 v3 t . (TensorType t,
-                                                            OneOf '[Double,
-                                                                    Float] t) =>
-                                       Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
-                                       -> Tensor v2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
-                                                                   -- where `filter` is a 4-D
-                                                                   -- `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
-                                       -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
-                                                      -- Gradients w.r.t. the output of the convolution.
-                                       -> Tensor Value t -- ^ __output__: 4-D with shape
-                                       -- `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
-                                       -- the `filter` input of the convolution.
-depthwiseConv2dNativeBackpropFilter input filter_sizes
-                                    out_backprop | eqLengthGuard [] =
-    buildOp (opDef "DepthwiseConv2dNativeBackpropFilter"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input filter_sizes out_backprop
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the input\nof the convolution."
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor."
-  name: "filter_sizes"
-  type: DT_INT32
-}
-input_arg {
-  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
-  name: "out_backprop"
-  type_attr: "T"
-}
-output_arg {
-  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.\nthe `filter` input of the convolution."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes the gradients of convolution with respect to the filter.
-
-conv2DBackpropFilter :: forall v1 v2 v3 t . (TensorType t,
-                                             OneOf '[Data.Word.Word16, Double,
-                                                     Float] t) =>
-                        Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
-                        -> Tensor v2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`,
-                                                    -- where `filter` is a 4-D
-                                                    -- `[filter_height, filter_width, in_channels, out_channels]` tensor.
-                        -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
-                                       -- Gradients w.r.t. the output of the convolution.
-                        -> Tensor Value t -- ^ __output__: 4-D with shape
-                        -- `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
-                        -- the `filter` input of the convolution.
-conv2DBackpropFilter input filter_sizes out_backprop | eqLengthGuard [] =
-    buildOp (opDef "Conv2DBackpropFilter"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input filter_sizes out_backprop
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat."
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  default_value { b: true } name: "use_cudnn_on_gpu" type: "bool"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values { list { s: "NHWC" s: "NCHW" } }
-  default_value { s: "NHWC" }
-  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
-  name: "data_format"
-  type: "string"
-}
-input_arg {
-  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, out_channels]` tensor."
-  name: "filter_sizes"
-  type: DT_INT32
-}
-input_arg {
-  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
-  name: "out_backprop"
-  type_attr: "T"
-}
-output_arg {
-  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.\nthe `filter` input of the convolution."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes the gradients of convolution with respect to the input.
-
-conv2DBackpropInput :: forall v1 v2 v3 t . (TensorType t,
-                                            OneOf '[Data.Word.Word16, Double,
-                                                    Float] t) =>
-                       Tensor v1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`,
-                                                -- where `input` is a 4-D `[batch, height, width, channels]` tensor.
-                       -> Tensor v2 t -- ^ __filter__: 4-D with shape
-                                      -- `[filter_height, filter_width, in_channels, out_channels]`.
-                       -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
-                                      -- Gradients w.r.t. the output of the convolution.
-                       -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
-                       -- w.r.t. the input of the convolution.
-conv2DBackpropInput input_sizes filter out_backprop | eqLengthGuard [] =
-    buildOp (opDef "Conv2DBackpropInput"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input_sizes filter out_backprop
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat."
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  default_value { b: true } name: "use_cudnn_on_gpu" type: "bool"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values { list { s: "NHWC" s: "NCHW" } }
-  default_value { s: "NHWC" }
-  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
-  name: "data_format"
-  type: "string"
-}
-input_arg {
-  description: "An integer vector representing the shape of `input`,\nwhere `input` is a 4-D `[batch, height, width, channels]` tensor."
-  name: "input_sizes"
-  type: DT_INT32
-}
-input_arg {
-  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
-  name: "filter"
-  type_attr: "T"
-}
-input_arg {
-  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
-  name: "out_backprop"
-  type_attr: "T"
-}
-output_arg {
-  description: "4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient\nw.r.t. the input of the convolution."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes a 2-D convolution given 4-D `input` and `filter` tensors.
---
--- Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
--- and a filter / kernel tensor of shape
--- `[filter_height, filter_width, in_channels, out_channels]`, this op
--- performs the following:
--- 
--- 1. Flattens the filter to a 2-D matrix with shape
---    `[filter_height * filter_width * in_channels, output_channels]`.
--- 2. Extracts image patches from the input tensor to form a *virtual*
---    tensor of shape `[batch, out_height, out_width,
---    filter_height * filter_width * in_channels]`.
--- 3. For each patch, right-multiplies the filter matrix and the image patch
---    vector.
--- 
--- In detail, with the default NHWC format,
--- 
---     output[b, i, j, k] =
---         sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
---                         filter[di, dj, q, k]
--- 
--- Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
--- horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
-conv2D :: forall v1 v2 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                                  Float] t) =>
-          Tensor v1 t -- ^ __input__
-          -> Tensor v2 t -- ^ __filter__
-          -> Tensor Value t -- ^ __output__
-conv2D input filter | eqLengthGuard [] =
-    buildOp (opDef "Conv2D"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input filter
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format."
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  default_value { b: true } name: "use_cudnn_on_gpu" type: "bool"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values { list { s: "NHWC" s: "NCHW" } }
-  default_value { s: "NHWC" }
-  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
-  name: "data_format"
-  type: "string"
-}
-input_arg { name: "input" type_attr: "T" }
-input_arg { name: "filter" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Adds `bias` to `value`.
---
--- This is a special case of `tf.add` where `bias` is restricted to be 1-D.
--- Broadcasting is supported, so `value` may have any number of dimensions.
-biasAdd :: forall v1 v2 t . (TensorType t,
-                             OneOf '[(Data.Complex.Complex Double),
-                                     (Data.Complex.Complex Float),
-                                     Data.Int.Int16, Data.Int.Int32,
-                                     Data.Int.Int64, Data.Int.Int8,
-                                     Data.Word.Word16, Data.Word.Word8, Double,
-                                     Float] t) =>
-           Tensor v1 t -- ^ __value__: Any number of dimensions.
-           -> Tensor v2 t -- ^ __bias__: 1-D with size the last dimension of `value`.
-           -> Tensor Value t -- ^ __output__: Broadcasted sum of `value` and `bias`.
-biasAdd value bias | eqLengthGuard [] =
-    buildOp (opDef "BiasAdd"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        value bias
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "NHWC" s: "NCHW" } }
-  default_value { s: "NHWC" }
-  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n    dimension."
-  name: "data_format"
-  type: "string"
-}
-input_arg {
-  description: "Any number of dimensions."
-  name: "value"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D with size the last dimension of `value`."
-  name: "bias"
-  type_attr: "T"
-}
-output_arg {
-  description: "Broadcasted sum of `value` and `bias`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Batch normalization.
---
--- Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
--- The size of 1D Tensors matches the dimension C of the 4D Tensors.
-fusedBatchNorm :: forall v1 v2 v3 v4 v5 t . (TensorType t,
-                                             OneOf '[(Data.Complex.Complex Double),
-                                                     (Data.Complex.Complex Float),
-                                                     Data.Int.Int16,
-                                                     Data.Int.Int32,
-                                                     Data.Int.Int64,
-                                                     Data.Int.Int8,
-                                                     Data.Word.Word16,
-                                                     Data.Word.Word8, Double,
-                                                     Float] t) =>
-                  Tensor v1 t -- ^ __x__: A 4D Tensor for input data.
-                  -> Tensor v2 t -- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x.
-                  -> Tensor v3 t -- ^ __offset__: A 1D Tensor for offset, to shift to the normalized x.
-                  -> Tensor v4 t -- ^ __mean__: A 1D Tensor for population mean. Used for inference only;
-                                 -- must be empty for training.
-                  -> Tensor v5 t -- ^ __variance__: A 1D Tensor for population variance. Used for inference only;
-                                 -- must be empty for training.
-                  -> (Tensor Value t, Tensor Value t, Tensor Value t,
-                      Tensor Value t, Tensor Value t)
-                  -- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__)
-                  --
-                  -- * __y__: A 4D Tensor for output data.
-                  --
-                  -- * __batch_mean__: A 1D Tensor for the computed batch mean, to be used by TensorFlow
-                  -- to compute the running mean.
-                  --
-                  -- * __batch_variance__: A 1D Tensor for the computed batch variance, to be used by
-                  -- TensorFlow to compute the running variance.
-                  --
-                  -- * __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused
-                  -- in the gradient computation.
-                  --
-                  -- * __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance
-                  -- in the cuDNN case), to be used in the gradient computation.
-fusedBatchNorm x scale offset mean variance | eqLengthGuard [] =
-    buildOp (opDef "FusedBatchNorm"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x scale offset mean variance
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  description: "The data type for the elements of input and output Tensors."
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { f: 1.0e-4 }
-  description: "A small float number added to the variance of x."
-  name: "epsilon"
-  type: "float"
-}
-attr {
-  default_value { s: "NHWC" }
-  description: "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\"."
-  name: "data_format"
-  type: "string"
-}
-attr {
-  default_value { b: true }
-  description: "A bool value to indicate the operation is for training (default)\nor inference."
-  name: "is_training"
-  type: "bool"
-}
-input_arg {
-  description: "A 4D Tensor for input data." name: "x" type_attr: "T"
-}
-input_arg {
-  description: "A 1D Tensor for scaling factor, to scale the normalized x."
-  name: "scale"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 1D Tensor for offset, to shift to the normalized x."
-  name: "offset"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training."
-  name: "mean"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training."
-  name: "variance"
-  type_attr: "T"
-}
-output_arg {
-  description: "A 4D Tensor for output data."
-  name: "y"
-  type_attr: "T"
-}
-output_arg {
-  description: "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean."
-  name: "batch_mean"
-  type_attr: "T"
-}
-output_arg {
-  description: "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance."
-  name: "batch_variance"
-  type_attr: "T"
-}
-output_arg {
-  description: "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation."
-  name: "reserve_space_1"
-  type_attr: "T"
-}
-output_arg {
-  description: "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be used in the gradient computation."
-  name: "reserve_space_2"
-  type_attr: "T"
-}
--}
-
--- | Gradients for batch normalization.
---
--- This op is deprecated. See `tf.nn.batch_normalization`.
-batchNormWithGlobalNormalizationGrad :: forall v1 v2 v3 v4 v5 t . (TensorType t,
-                                                                   OneOf '[(Data.Complex.Complex Double),
-                                                                           (Data.Complex.Complex Float),
-                                                                           Data.Int.Int16,
-                                                                           Data.Int.Int32,
-                                                                           Data.Int.Int64,
-                                                                           Data.Int.Int8,
-                                                                           Data.Word.Word16,
-                                                                           Data.Word.Word8,
-                                                                           Double,
-                                                                           Float] t) =>
-                                        Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor
-                                             -- needs to be multiplied with gamma.
-                                        -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0.
-                                        -> Tensor v1 t -- ^ __t__: A 4D input Tensor.
-                                        -> Tensor v2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.
-                                                       -- This is the first output from tf.nn.moments,
-                                                       -- or a saved moving average thereof.
-                                        -> Tensor v3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.
-                                                       -- This is the second output from tf.nn.moments,
-                                                       -- or a saved moving average thereof.
-                                        -> Tensor v4 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.
-                                                       -- If "scale_after_normalization" is true, this Tensor will be multiplied
-                                                       -- with the normalized Tensor.
-                                        -> Tensor v5 t -- ^ __backprop__: 4D backprop Tensor.
-                                        -> (Tensor Value t, Tensor Value t,
-                                            Tensor Value t, Tensor Value t,
-                                            Tensor Value t)
-                                        -- ^ (__dx__, __dm__, __dv__, __db__, __dg__)
-                                        --
-                                        -- * __dx__: 4D backprop tensor for input.
-                                        --
-                                        -- * __dm__: 1D backprop tensor for mean.
-                                        --
-                                        -- * __dv__: 1D backprop tensor for variance.
-                                        --
-                                        -- * __db__: 1D backprop tensor for beta.
-                                        --
-                                        -- * __dg__: 1D backprop tensor for gamma.
-batchNormWithGlobalNormalizationGrad scale_after_normalization variance_epsilon
-                                     t m v gamma backprop | eqLengthGuard [] =
-    buildOp (opDef "BatchNormWithGlobalNormalizationGrad"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "scale_after_normalization" .~ scale_after_normalization
-             & opAttr "variance_epsilon" .~ variance_epsilon)
-        t m v gamma backprop
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "A small float number to avoid dividing by 0."
-  name: "variance_epsilon"
-  type: "float"
-}
-attr {
-  description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
-  name: "scale_after_normalization"
-  type: "bool"
-}
-input_arg {
-  description: "A 4D input Tensor." name: "t" type_attr: "T"
-}
-input_arg {
-  description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
-  name: "m"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
-  name: "v"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this Tensor will be multiplied\nwith the normalized Tensor."
-  name: "gamma"
-  type_attr: "T"
-}
-input_arg {
-  description: "4D backprop Tensor." name: "backprop" type_attr: "T"
-}
-output_arg {
-  description: "4D backprop tensor for input."
-  name: "dx"
-  type_attr: "T"
-}
-output_arg {
-  description: "1D backprop tensor for mean."
-  name: "dm"
-  type_attr: "T"
-}
-output_arg {
-  description: "1D backprop tensor for variance."
-  name: "dv"
-  type_attr: "T"
-}
-output_arg {
-  description: "1D backprop tensor for beta."
-  name: "db"
-  type_attr: "T"
-}
-output_arg {
-  description: "1D backprop tensor for gamma."
-  name: "dg"
-  type_attr: "T"
-}
--}
-
--- | 
-
-batchFFT3D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
-              -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
-batchFFT3D input | eqLengthGuard [] =
-    buildOp (opDef "BatchFFT3D")
-        input
-{-
-input_arg { name: "input" type: DT_COMPLEX64 }
-output_arg { name: "output" type: DT_COMPLEX64 }
--}
-
--- | 
-
-batchIFFT2D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
-               -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
-batchIFFT2D input | eqLengthGuard [] =
-    buildOp (opDef "BatchIFFT2D")
-        input
-{-
-input_arg { name: "input" type: DT_COMPLEX64 }
-output_arg { name: "output" type: DT_COMPLEX64 }
--}
-
--- | Performs average pooling on the input.
---
--- Each entry in `output` is the mean of the corresponding size `ksize`
--- window in `value`.
-avgPool :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                                Float] t) =>
-           Tensor v1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
-           -> Tensor Value t -- ^ __output__: The average pooled output tensor.
-avgPool value | eqLengthGuard [] =
-    buildOp (opDef "AvgPool"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        value
-{-
-attr {
-  description: "The size of the sliding window for each dimension of `value`."
-  has_minimum: true
-  minimum: 4
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of `value`."
-  has_minimum: true
-  minimum: 4
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values { list { s: "NHWC" s: "NCHW" } }
-  default_value { s: "NHWC" }
-  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
-  name: "data_format"
-  type: "string"
-}
-attr {
-  allowed_values {
-    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "value"
-  type_attr: "T"
-}
-output_arg {
-  description: "The average pooled output tensor."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | 
-
-batchFFT2D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
-              -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
-batchFFT2D input | eqLengthGuard [] =
-    buildOp (opDef "BatchFFT2D")
-        input
-{-
-input_arg { name: "input" type: DT_COMPLEX64 }
-output_arg { name: "output" type: DT_COMPLEX64 }
--}
-
--- | 
-
-batchFFT :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
-            -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
-batchFFT input | eqLengthGuard [] =
-    buildOp (opDef "BatchFFT")
-        input
-{-
-input_arg { name: "input" type: DT_COMPLEX64 }
-output_arg { name: "output" type: DT_COMPLEX64 }
--}
-
--- | Given a quantized tensor described by (input, input_min, input_max), outputs a
---
--- range that covers the actual values present in that tensor.  This op is
--- typically used to produce the requested_output_min and requested_output_max for
--- Requantize.
-requantizationRange :: forall v1 v2 v3 tinput . (TensorType tinput,
-                                                 OneOf '[Data.Int.Int16,
-                                                         Data.Int.Int32,
-                                                         Data.Word.Word16,
-                                                         Data.Word.Word8] tinput) =>
-                       Tensor v1 tinput -- ^ __input__
-                       -> Tensor v2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents.
-                       -> Tensor v3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents.
-                       -> (Tensor Value Float, Tensor Value Float)
-                       -- ^ (__output_min__, __output_max__)
-                       --
-                       -- * __output_min__: The computed min output.
-                       --
-                       -- * __output_max__: the computed max output.
-requantizationRange input input_min input_max | eqLengthGuard [] =
-    buildOp (opDef "RequantizationRange"
-             & opAttr "Tinput" .~ tensorType (undefined :: tinput))
-        input input_min input_max
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  description: "The type of the input."
-  name: "Tinput"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "Tinput" }
-input_arg {
-  description: "The float value that the minimum quantized input value represents."
-  name: "input_min"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the maximum quantized input value represents."
-  name: "input_max"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The computed min output."
-  name: "output_min"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "the computed max output."
-  name: "output_max"
-  type: DT_FLOAT
-}
--}
-
--- | Convert the quantized 'input' tensor into a lower-precision 'output', using the
---
--- output range specified with 'requested_output_min' and 'requested_output_max'.
--- 
--- [input_min, input_max] are scalar floats that specify the range for the float
--- interpretation of the 'input' data. For example, if input_min is -1.0f and
--- input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
--- value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
-requantize :: forall v1 v2 v3 v4 v5 tinput out_type . (TensorType tinput,
-                                                       OneOf '[Data.Int.Int16,
-                                                               Data.Int.Int32,
-                                                               Data.Word.Word16,
-                                                               Data.Word.Word8] tinput,
-                                                       TensorType out_type,
-                                                       OneOf '[Data.Int.Int16,
-                                                               Data.Int.Int32,
-                                                               Data.Word.Word16,
-                                                               Data.Word.Word8] out_type) =>
-              Tensor v1 tinput -- ^ __input__
-              -> Tensor v2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents.
-              -> Tensor v3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents.
-              -> Tensor v4 Float -- ^ __requested_output_min__: The float value that the minimum quantized output value represents.
-              -> Tensor v5 Float -- ^ __requested_output_max__: The float value that the maximum quantized output value represents.
-              -> (Tensor Value out_type, Tensor Value Float, Tensor Value Float)
-              -- ^ (__output__, __output_min__, __output_max__)
-              --
-              -- * __output__
-              --
-              -- * __output_min__: The requested_output_min value is copied into this output.
-              --
-              -- * __output_max__: The requested_output_max value is copied into this output.
-requantize input input_min input_max requested_output_min
-           requested_output_max | eqLengthGuard [] =
-    buildOp (opDef "Requantize"
-             & opAttr "Tinput" .~ tensorType (undefined :: tinput)
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        input input_min input_max requested_output_min requested_output_max
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  description: "The type of the input."
-  name: "Tinput"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  description: "The type of the output. Should be a lower bit depth than Tinput."
-  name: "out_type"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "Tinput" }
-input_arg {
-  description: "The float value that the minimum quantized input value represents."
-  name: "input_min"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the maximum quantized input value represents."
-  name: "input_max"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the minimum quantized output value represents."
-  name: "requested_output_min"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the maximum quantized output value represents."
-  name: "requested_output_max"
-  type: DT_FLOAT
-}
-output_arg { name: "output" type_attr: "out_type" }
-output_arg {
-  description: "The requested_output_min value is copied into this output."
-  name: "output_min"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The requested_output_max value is copied into this output."
-  name: "output_max"
-  type: DT_FLOAT
-}
--}
-
--- | Convert the quantized 'input' tensor into a lower-precision 'output', using the
---
--- actual distribution of the values to maximize the usage of the lower bit depth
--- and adjusting the output min and max ranges accordingly.
--- 
--- [input_min, input_max] are scalar floats that specify the range for the float
--- interpretation of the 'input' data. For example, if input_min is -1.0f and
--- input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
--- value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
--- 
--- This operator tries to squeeze as much precision as possible into an output with
--- a lower bit depth by calculating the actual min and max values found in the
--- data. For example, maybe that quint16 input has no values lower than 16,384 and
--- none higher than 49,152. That means only half the range is actually needed, all
--- the float interpretations are between -0.5f and 0.5f, so if we want to compress
--- the data into a quint8 output, we can use that range rather than the theoretical
--- -1.0f to 1.0f that is suggested by the input min and max.
--- 
--- In practice, this is most useful for taking output from operations like
--- QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
--- may have large potential output ranges, but in practice have a distribution of
--- input values that only uses a small fraction of the possible range. By feeding
--- that output into this operator, we can reduce it from 32 bits down to 8 with
--- minimal loss of accuracy.
-quantizeDownAndShrinkRange :: forall v1 v2 v3 tinput
-                              out_type . (TensorType tinput,
-                                          OneOf '[Data.Int.Int16,
-                                                  Data.Int.Int32,
-                                                  Data.Word.Word16,
-                                                  Data.Word.Word8] tinput,
-                                          TensorType out_type,
-                                          OneOf '[Data.Int.Int16,
-                                                  Data.Int.Int32,
-                                                  Data.Word.Word16,
-                                                  Data.Word.Word8] out_type) =>
-                              Tensor v1 tinput -- ^ __input__
-                              -> Tensor v2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents.
-                              -> Tensor v3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents.
-                              -> (Tensor Value out_type, Tensor Value Float,
-                                  Tensor Value Float)
-                              -- ^ (__output__, __output_min__, __output_max__)
-                              --
-                              -- * __output__
-                              --
-                              -- * __output_min__: The float value that the minimum quantized output value represents.
-                              --
-                              -- * __output_max__: The float value that the maximum quantized output value represents.
-quantizeDownAndShrinkRange input input_min input_max | eqLengthGuard [] =
-    buildOp (opDef "QuantizeDownAndShrinkRange"
-             & opAttr "Tinput" .~ tensorType (undefined :: tinput)
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        input input_min input_max
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  description: "The type of the input."
-  name: "Tinput"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  description: "The type of the output. Should be a lower bit depth than Tinput."
-  name: "out_type"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "Tinput" }
-input_arg {
-  description: "The float value that the minimum quantized input value represents."
-  name: "input_min"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the maximum quantized input value represents."
-  name: "input_max"
-  type: DT_FLOAT
-}
-output_arg { name: "output" type_attr: "out_type" }
-output_arg {
-  description: "The float value that the minimum quantized output value represents."
-  name: "output_min"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The float value that the maximum quantized output value represents."
-  name: "output_max"
-  type: DT_FLOAT
-}
--}
-
--- | Perform a quantized matrix multiplication of  `a` by the matrix `b`.
---
--- The inputs must be two-dimensional matrices and the inner dimension of
--- `a` (after being transposed if `transpose_a` is non-zero) must match the
--- outer dimension of `b` (after being transposed if `transposed_b` is
--- non-zero).
-quantizedMatMul :: forall v1 v2 v3 v4 v5 v6 t1 t2 toutput . (TensorType t1,
-                                                             OneOf '[Data.Int.Int16,
-                                                                     Data.Int.Int32,
-                                                                     Data.Word.Word16,
-                                                                     Data.Word.Word8] t1,
-                                                             TensorType t2,
-                                                             OneOf '[Data.Int.Int16,
-                                                                     Data.Int.Int32,
-                                                                     Data.Word.Word16,
-                                                                     Data.Word.Word8] t2,
-                                                             TensorType toutput,
-                                                             OneOf '[Data.Int.Int16,
-                                                                     Data.Int.Int32,
-                                                                     Data.Word.Word16,
-                                                                     Data.Word.Word8] toutput) =>
-                   Tensor v1 t1 -- ^ __a__: Must be a two-dimensional tensor.
-                   -> Tensor v2 t2 -- ^ __b__: Must be a two-dimensional tensor.
-                   -> Tensor v3 Float -- ^ __min_a__: The float value that the lowest quantized `a` value represents.
-                   -> Tensor v4 Float -- ^ __max_a__: The float value that the highest quantized `a` value represents.
-                   -> Tensor v5 Float -- ^ __min_b__: The float value that the lowest quantized `b` value represents.
-                   -> Tensor v6 Float -- ^ __max_b__: The float value that the highest quantized `b` value represents.
-                   -> (Tensor Value toutput, Tensor Value Float,
-                       Tensor Value Float)
-                   -- ^ (__out__, __min_out__, __max_out__)
-                   --
-                   -- * __out__
-                   --
-                   -- * __min_out__: The float value that the lowest quantized output value represents.
-                   --
-                   -- * __max_out__: The float value that the highest quantized output value represents.
-quantizedMatMul a b min_a max_a min_b max_b | eqLengthGuard [] =
-    buildOp (opDef "QuantizedMatMul"
-             & opAttr "T1" .~ tensorType (undefined :: t1)
-             & opAttr "T2" .~ tensorType (undefined :: t2)
-             & opAttr "Toutput" .~ tensorType (undefined :: toutput))
-        a b min_a max_a min_b max_b
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "T1"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "T2"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  default_value { type: DT_QINT32 }
-  name: "Toutput"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If true, `a` is transposed before multiplication."
-  name: "transpose_a"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "If true, `b` is transposed before multiplication."
-  name: "transpose_b"
-  type: "bool"
-}
-input_arg {
-  description: "Must be a two-dimensional tensor."
-  name: "a"
-  type_attr: "T1"
-}
-input_arg {
-  description: "Must be a two-dimensional tensor."
-  name: "b"
-  type_attr: "T2"
-}
-input_arg {
-  description: "The float value that the lowest quantized `a` value represents."
-  name: "min_a"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the highest quantized `a` value represents."
-  name: "max_a"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the lowest quantized `b` value represents."
-  name: "min_b"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the highest quantized `b` value represents."
-  name: "max_b"
-  type: DT_FLOAT
-}
-output_arg { name: "out" type_attr: "Toutput" }
-output_arg {
-  description: "The float value that the lowest quantized output value represents."
-  name: "min_out"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The float value that the highest quantized output value represents."
-  name: "max_out"
-  type: DT_FLOAT
-}
--}
-
--- | Compute the cumulative product of the tensor `x` along `axis`.
---
--- By default, this op performs an inclusive cumprod, which means that the first
--- element of the input is identical to the first element of the output:
--- ```prettyprint
--- tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c]
--- ```
--- 
--- By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
--- performed instead:
--- ```prettyprint
--- tf.cumprod([a, b, c], exclusive=True) ==> [0, a, a * b]
--- ```
--- 
--- By setting the `reverse` kwarg to `True`, the cumprod is performed in the
--- opposite direction:
--- ```prettyprint
--- tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
--- ```
--- This is more efficient than using separate `tf.reverse` ops.
--- 
--- The `reverse` and `exclusive` kwargs can also be combined:
--- ```prettyprint
--- tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 0]
--- ```
-cumprod :: forall v1 v2 t tidx . (TensorType t,
-                                  OneOf '[(Data.Complex.Complex Double),
-                                          (Data.Complex.Complex Float),
-                                          Data.Int.Int16, Data.Int.Int32,
-                                          Data.Int.Int64, Data.Int.Int8,
-                                          Data.Word.Word16, Data.Word.Word8,
-                                          Double, Float] t, TensorType tidx,
-                                  OneOf '[Data.Int.Int32,
-                                          Data.Int.Int64] tidx) =>
-           Tensor v1 t -- ^ __x__
-           -> Tensor v2 tidx -- ^ __axis__
-           -> Tensor Value t -- ^ __out__
-cumprod x axis | eqLengthGuard [] =
-    buildOp (opDef "Cumprod"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        x axis
-{-
-attr { default_value { b: false } name: "exclusive" type: "bool" }
-attr { default_value { b: false } name: "reverse" type: "bool" }
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "axis" type_attr: "Tidx" }
-output_arg { name: "out" type_attr: "T" }
--}
-
--- | Compute the cumulative sum of the tensor `x` along `axis`.
---
--- By default, this op performs an inclusive cumsum, which means that the first
--- element of the input is identical to the first element of the output:
--- ```prettyprint
--- tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c]
--- ```
--- 
--- By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
--- performed instead:
--- ```prettyprint
--- tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
--- ```
--- 
--- By setting the `reverse` kwarg to `True`, the cumsum is performed in the
--- opposite direction:
--- ```prettyprint
--- tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
--- ```
--- This is more efficient than using separate `tf.reverse` ops.
--- 
--- The `reverse` and `exclusive` kwargs can also be combined:
--- ```prettyprint
--- tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
--- ```
-cumsum :: forall v1 v2 t tidx . (TensorType t,
-                                 OneOf '[(Data.Complex.Complex Double),
-                                         (Data.Complex.Complex Float),
-                                         Data.Int.Int16, Data.Int.Int32,
-                                         Data.Int.Int64, Data.Int.Int8,
-                                         Data.Word.Word16, Data.Word.Word8,
-                                         Double, Float] t, TensorType tidx,
-                                 OneOf '[Data.Int.Int32,
-                                         Data.Int.Int64] tidx) =>
-          Tensor v1 t -- ^ __x__
-          -> Tensor v2 tidx -- ^ __axis__
-          -> Tensor Value t -- ^ __out__
-cumsum x axis | eqLengthGuard [] =
-    buildOp (opDef "Cumsum"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        x axis
-{-
-attr { default_value { b: false } name: "exclusive" type: "bool" }
-attr { default_value { b: false } name: "reverse" type: "bool" }
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "axis" type_attr: "Tidx" }
-output_arg { name: "out" type_attr: "T" }
--}
-
--- | Compute the pairwise cross product.
---
--- `a` and `b` must be the same shape; they can either be simple 3-element vectors,
--- or any shape where the innermost dimension is 3. In the latter case, each pair
--- of corresponding 3-element vectors is cross-multiplied independently.
-cross :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Data.Word.Word16,
-                                                 Data.Word.Word8, Double,
-                                                 Float] t) =>
-         Tensor v1 t -- ^ __a__: A tensor containing 3-element vectors.
-         -> Tensor v2 t -- ^ __b__: Another tensor, of same type and shape as `a`.
-         -> Tensor Value t -- ^ __product__: Pairwise cross product of the vectors in `a` and `b`.
-cross a b | eqLengthGuard [] =
-    buildOp (opDef "Cross"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        a b
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "A tensor containing 3-element vectors."
-  name: "a"
-  type_attr: "T"
-}
-input_arg {
-  description: "Another tensor, of same type and shape as `a`."
-  name: "b"
-  type_attr: "T"
-}
-output_arg {
-  description: "Pairwise cross product of the vectors in `a` and `b`."
-  name: "product"
-  type_attr: "T"
-}
--}
-
--- | Compute the inverse 3-dimensional discrete Fourier Transform over the inner-most
---
--- 3 dimensions of `input`.
-iFFT3D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
-          -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3
-          --   dimensions of `input` are replaced with their inverse 3D Fourier Transform.
-          -- 
-          -- @compatibility(numpy)
-          -- Equivalent to np.fft3
-          -- @end_compatibility
-iFFT3D input | eqLengthGuard [] =
-    buildOp (opDef "IFFT3D")
-        input
-{-
-input_arg {
-  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
-}
-output_arg {
-  description: "A complex64 tensor of the same shape as `input`. The inner-most 3\n  dimensions of `input` are replaced with their inverse 3D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft3\n@end_compatibility"
-  name: "output"
-  type: DT_COMPLEX64
-}
--}
-
--- | Compute the 3-dimensional discrete Fourier Transform over the inner-most 3
---
--- dimensions of `input`.
-fFT3D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
-         -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3
-         --   dimensions of `input` are replaced with their 3D Fourier Transform.
-         -- 
-         -- @compatibility(numpy)
-         -- Equivalent to np.fft3
-         -- @end_compatibility
-fFT3D input | eqLengthGuard [] =
-    buildOp (opDef "FFT3D")
-        input
-{-
-input_arg {
-  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
-}
-output_arg {
-  description: "A complex64 tensor of the same shape as `input`. The inner-most 3\n  dimensions of `input` are replaced with their 3D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft3\n@end_compatibility"
-  name: "output"
-  type: DT_COMPLEX64
-}
--}
-
--- | Computes gradients of the maxpooling function.
-
-maxPoolGradWithArgmax :: forall v1 v2 v3 targmax t . (TensorType targmax,
-                                                      OneOf '[Data.Int.Int32,
-                                                              Data.Int.Int64] targmax,
-                                                      TensorType t,
-                                                      OneOf '[Data.Word.Word16,
-                                                              Float] t) =>
-                         Tensor v1 t -- ^ __input__: The original input.
-                         -> Tensor v2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
-                                        -- output of `max_pool`.
-                         -> Tensor v3 targmax -- ^ __argmax__: The indices of the maximum values chosen for each output of `max_pool`.
-                         -> Tensor Value t -- ^ __output__: Gradients w.r.t. the input of `max_pool`.
-maxPoolGradWithArgmax input grad argmax | eqLengthGuard [] =
-    buildOp (opDef "MaxPoolGradWithArgmax"
-             & opAttr "Targmax" .~ tensorType (undefined :: targmax)
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input grad argmax
-{-
-attr {
-  description: "The size of the window for each dimension of the input tensor."
-  has_minimum: true
-  minimum: 4
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the\ninput tensor."
-  has_minimum: true
-  minimum: 4
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Targmax"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The original input." name: "input" type_attr: "T"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the\noutput of `max_pool`."
-  name: "grad"
-  type_attr: "T"
-}
-input_arg {
-  description: "The indices of the maximum values chosen for each output of `max_pool`."
-  name: "argmax"
-  type_attr: "Targmax"
-}
-output_arg {
-  description: "Gradients w.r.t. the input of `max_pool`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Compute the 2-dimensional discrete Fourier Transform over the inner-most
---
--- 2 dimensions of `input`.
-fFT2D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
-         -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2
-         --   dimensions of `input` are replaced with their 2D Fourier Transform.
-         -- 
-         -- @compatibility(numpy)
-         -- Equivalent to np.fft2
-         -- @end_compatibility
-fFT2D input | eqLengthGuard [] =
-    buildOp (opDef "FFT2D")
-        input
-{-
-input_arg {
-  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
-}
-output_arg {
-  description: "A complex64 tensor of the same shape as `input`. The inner-most 2\n  dimensions of `input` are replaced with their 2D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft2\n@end_compatibility"
-  name: "output"
-  type: DT_COMPLEX64
-}
--}
-
--- | Compute the inverse 1-dimensional discrete Fourier Transform over the inner-most
---
--- dimension of `input`.
-iFFT :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
-        -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most
-        -- dimension of `input` is replaced with its inverse 1D Fourier Transform.
-iFFT input | eqLengthGuard [] =
-    buildOp (opDef "IFFT")
-        input
-{-
-input_arg {
-  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
-}
-output_arg {
-  description: "A complex64 tensor of the same shape as `input`. The inner-most\ndimension of `input` is replaced with its inverse 1D Fourier Transform."
-  name: "output"
-  type: DT_COMPLEX64
-}
--}
-
--- | Compute the 1-dimensional discrete Fourier Transform over the inner-most
---
--- dimension of `input`.
-fFT :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
-       -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most
-       -- dimension of `input` is replaced with its 1D Fourier Transform.
-fFT input | eqLengthGuard [] =
-    buildOp (opDef "FFT")
-        input
-{-
-input_arg {
-  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
-}
-output_arg {
-  description: "A complex64 tensor of the same shape as `input`. The inner-most\ndimension of `input` is replaced with its 1D Fourier Transform."
-  name: "output"
-  type: DT_COMPLEX64
-}
--}
-
--- | Returns the complex conjugate of a complex number.
---
--- Given a tensor `input` of complex numbers, this operation returns a tensor of
--- complex numbers that are the complex conjugate of each element in `input`. The
--- complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
--- real part and *b* is the imaginary part.
--- 
--- The complex conjugate returned by this operation is of the form \\(a - bj\\).
--- 
--- For example:
--- 
--- ```
--- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
--- tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
--- ```
-conj :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                             (Data.Complex.Complex Float)] t) =>
-        Tensor v1 t -- ^ __input__
-        -> Tensor Value t -- ^ __output__
-conj input | eqLengthGuard [] =
-    buildOp (opDef "Conj"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
-  default_value { type: DT_COMPLEX64 }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Returns the real part of a complex number.
---
--- Given a tensor `input` of complex numbers, this operation returns a tensor of
--- type `float` that is the real part of each element in `input`. All elements in
--- `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
---  part returned by this operation and *b* is the imaginary part.
--- 
--- For example:
--- 
--- ```
--- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
--- tf.real(input) ==> [-2.25, 3.25]
--- ```
-real :: forall v1 t tout . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                  (Data.Complex.Complex Float)] t,
-                            TensorType tout, OneOf '[Double, Float] tout) =>
-        Tensor v1 t -- ^ __input__
-        -> Tensor Value tout -- ^ __output__
-real input | eqLengthGuard [] =
-    buildOp (opDef "Real"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tout" .~ tensorType (undefined :: tout))
-        input
-{-
-attr {
-  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
-  default_value { type: DT_COMPLEX64 }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  default_value { type: DT_FLOAT }
-  name: "Tout"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "Tout" }
--}
-
--- | Converts two real numbers to a complex number.
---
--- Given a tensor `real` representing the real part of a complex number, and a
--- tensor `imag` representing the imaginary part of a complex number, this
--- operation returns complex numbers elementwise of the form \\(a + bj\\), where
--- *a* represents the `real` part and *b* represents the `imag` part.
--- 
--- The input tensors `real` and `imag` must have the same shape.
--- 
--- For example:
--- 
--- ```
--- # tensor 'real' is [2.25, 3.25]
--- # tensor `imag` is [4.75, 5.75]
--- tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
--- ```
-complex :: forall v1 v2 t tout . (TensorType t, OneOf '[Double, Float] t,
-                                  TensorType tout,
-                                  OneOf '[(Data.Complex.Complex Double),
-                                          (Data.Complex.Complex Float)] tout) =>
-           Tensor v1 t -- ^ __real__
-           -> Tensor v2 t -- ^ __imag__
-           -> Tensor Value tout -- ^ __out__
-complex real imag | eqLengthGuard [] =
-    buildOp (opDef "Complex"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tout" .~ tensorType (undefined :: tout))
-        real imag
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
-  default_value { type: DT_COMPLEX64 }
-  name: "Tout"
-  type: "type"
-}
-input_arg { name: "real" type_attr: "T" }
-input_arg { name: "imag" type_attr: "T" }
-output_arg { name: "out" type_attr: "Tout" }
--}
-
--- | Creates a sequence of numbers.
---
--- This operation creates a sequence of numbers that begins at `start` and
--- extends by increments of `delta` up to but not including `limit`.
--- 
--- For example:
--- 
--- ```
--- # 'start' is 3
--- # 'limit' is 18
--- # 'delta' is 3
--- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
--- ```
-range :: forall v1 v2 v3 tidx . (TensorType tidx, OneOf '[Data.Int.Int32,
-                                                          Data.Int.Int64,
-                                                          Double,
-                                                          Float] tidx) =>
-         Tensor v1 tidx -- ^ __start__: 0-D (scalar). First entry in the sequence.
-         -> Tensor v2 tidx -- ^ __limit__: 0-D (scalar). Upper limit of sequence, exclusive.
-         -> Tensor v3 tidx -- ^ __delta__: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
-         -> Tensor Value tidx -- ^ __output__: 1-D.
-range start limit delta | eqLengthGuard [] =
-    buildOp (opDef "Range"
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        start limit delta
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
-    }
-  }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "0-D (scalar). First entry in the sequence."
-  name: "start"
-  type_attr: "Tidx"
-}
-input_arg {
-  description: "0-D (scalar). Upper limit of sequence, exclusive."
-  name: "limit"
-  type_attr: "Tidx"
-}
-input_arg {
-  description: "0-D (scalar). Optional. Default is 1. Number that increments `start`."
-  name: "delta"
-  type_attr: "Tidx"
-}
-output_arg { description: "1-D." name: "output" type_attr: "Tidx" }
--}
-
--- | Computes the "logical or" of elements across dimensions of a tensor.
---
--- Reduces `input` along the dimensions given in `reduction_indices`. Unless
--- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
--- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
--- retained with length 1.
-any :: forall v1 v2 tidx . (TensorType tidx, OneOf '[Data.Int.Int32,
-                                                     Data.Int.Int64] tidx) =>
-       Tensor v1 Bool -- ^ __input__: The tensor to reduce.
-       -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
-       -> Tensor Value Bool -- ^ __output__: The reduced tensor.
-any input reduction_indices | eqLengthGuard [] =
-    buildOp (opDef "Any"
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        input reduction_indices
-{-
-attr {
-  default_value { b: false }
-  description: "If true, retain reduced dimensions with length 1."
-  name: "keep_dims"
-  type: "bool"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "The tensor to reduce." name: "input" type: DT_BOOL
-}
-input_arg {
-  description: "The dimensions to reduce."
-  name: "reduction_indices"
-  type_attr: "Tidx"
-}
-output_arg {
-  description: "The reduced tensor." name: "output" type: DT_BOOL
-}
--}
-
--- | Computes the mean along sparse segments of a tensor.
---
--- Read [the section on
--- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
--- of segments.
--- 
--- Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
--- dimension, selecting a subset of dimension 0, specified by `indices`.
-sparseSegmentMean :: forall v1 v2 v3 t tidx . (TensorType t, OneOf '[Double,
-                                                                     Float] t,
-                                               TensorType tidx,
-                                               OneOf '[Data.Int.Int32,
-                                                       Data.Int.Int64] tidx) =>
-                     Tensor v1 t -- ^ __data__
-                     -> Tensor v2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
-                     -> Tensor v3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
-                     -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
-                     -- has size `k`, the number of segments.
-sparseSegmentMean data' indices segment_ids | eqLengthGuard [] =
-    buildOp (opDef "SparseSegmentMean"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        data' indices segment_ids
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg { name: "data" type_attr: "T" }
-input_arg {
-  description: "A 1-D tensor. Has same rank as `segment_ids`."
-  name: "indices"
-  type_attr: "Tidx"
-}
-input_arg {
-  description: "A 1-D tensor. Values should be sorted and can be repeated."
-  name: "segment_ids"
-  type: DT_INT32
-}
-output_arg {
-  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes the sum along sparse segments of a tensor.
---
--- Read [the section on
--- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
--- of segments.
--- 
--- Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
--- dimension, selecting a subset of dimension 0, specified by `indices`.
--- 
--- For example:
--- 
--- ```prettyprint
--- c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
--- 
--- # Select two rows, one segment.
--- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
---   ==> [[0 0 0 0]]
--- 
--- # Select two rows, two segment.
--- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
---   ==> [[ 1  2  3  4]
---        [-1 -2 -3 -4]]
--- 
--- # Select all rows, two segments.
--- tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
---   ==> [[0 0 0 0]
---        [5 6 7 8]]
--- 
--- # Which is equivalent to:
--- tf.segment_sum(c, tf.constant([0, 0, 1]))
--- ```
-sparseSegmentSum :: forall v1 v2 v3 t tidx . (TensorType t,
-                                              OneOf '[Data.Int.Int16,
-                                                      Data.Int.Int32,
-                                                      Data.Int.Int64,
-                                                      Data.Int.Int8,
-                                                      Data.Word.Word16,
-                                                      Data.Word.Word8, Double,
-                                                      Float] t, TensorType tidx,
-                                              OneOf '[Data.Int.Int32,
-                                                      Data.Int.Int64] tidx) =>
-                    Tensor v1 t -- ^ __data__
-                    -> Tensor v2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
-                    -> Tensor v3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
-                    -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
-                    -- has size `k`, the number of segments.
-sparseSegmentSum data' indices segment_ids | eqLengthGuard [] =
-    buildOp (opDef "SparseSegmentSum"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        data' indices segment_ids
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg { name: "data" type_attr: "T" }
-input_arg {
-  description: "A 1-D tensor. Has same rank as `segment_ids`."
-  name: "indices"
-  type_attr: "Tidx"
-}
-input_arg {
-  description: "A 1-D tensor. Values should be sorted and can be repeated."
-  name: "segment_ids"
-  type: DT_INT32
-}
-output_arg {
-  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes the sum along segments of a tensor.
---
--- Read [the section on
--- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
--- of segments.
--- 
--- Computes a tensor such that
--- `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
--- that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
--- need not be sorted and need not cover all values in the full
--- range of valid values.
--- 
--- If the sum is empty for a given segment ID `i`, `output[i] = 0`.
--- 
--- `num_segments` should equal the number of distinct segment IDs.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/UnsortedSegmentSum.png" alt>
--- </div>
-unsortedSegmentSum :: forall v1 v2 v3 t tindices . (TensorType t,
-                                                    OneOf '[(Data.Complex.Complex Double),
-                                                            (Data.Complex.Complex Float),
-                                                            Data.Int.Int16,
-                                                            Data.Int.Int32,
-                                                            Data.Int.Int64,
-                                                            Data.Int.Int8,
-                                                            Data.Word.Word16,
-                                                            Data.Word.Word8,
-                                                            Double, Float] t,
-                                                    TensorType tindices,
-                                                    OneOf '[Data.Int.Int32,
-                                                            Data.Int.Int64] tindices) =>
-                      Tensor v1 t -- ^ __data__
-                      -> Tensor v2 tindices -- ^ __segment_ids__: A tensor whose shape is a prefix of `data.shape`.
-                      -> Tensor v3 Data.Int.Int32 -- ^ __num_segments__
-                      -> Tensor Value t -- ^ __output__: Has same shape as data, except for the first `segment_ids.rank`
-                      -- dimensions, which are replaced with a single dimension which has size
-                      -- `num_segments`.
-unsortedSegmentSum data' segment_ids num_segments | eqLengthGuard [] =
-    buildOp (opDef "UnsortedSegmentSum"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        data' segment_ids num_segments
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg { name: "data" type_attr: "T" }
-input_arg {
-  description: "A tensor whose shape is a prefix of `data.shape`."
-  name: "segment_ids"
-  type_attr: "Tindices"
-}
-input_arg { name: "num_segments" type: DT_INT32 }
-output_arg {
-  description: "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes the minimum along segments of a tensor.
---
--- Read [the section on
--- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
--- of segments.
--- 
--- Computes a tensor such that
--- \\(output_i = \min_j(data_j)\\) where `min` is over `j` such
--- that `segment_ids[j] == i`.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/SegmentMin.png" alt>
--- </div>
-segmentMin :: forall v1 v2 t tindices . (TensorType t, OneOf '[Data.Int.Int16,
-                                                               Data.Int.Int32,
-                                                               Data.Int.Int64,
-                                                               Data.Int.Int8,
-                                                               Data.Word.Word16,
-                                                               Data.Word.Word8,
-                                                               Double, Float] t,
-                                         TensorType tindices,
-                                         OneOf '[Data.Int.Int32,
-                                                 Data.Int.Int64] tindices) =>
-              Tensor v1 t -- ^ __data__
-              -> Tensor v2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
-                                    -- first dimension.  Values should be sorted and can be repeated.
-              -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
-              -- has size `k`, the number of segments.
-segmentMin data' segment_ids | eqLengthGuard [] =
-    buildOp (opDef "SegmentMin"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        data' segment_ids
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg { name: "data" type_attr: "T" }
-input_arg {
-  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
-  name: "segment_ids"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes the product along segments of a tensor.
---
--- Read [the section on
--- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
--- of segments.
--- 
--- Computes a tensor such that
--- \\(output_i = \prod_j data_j\\) where the product is over `j` such
--- that `segment_ids[j] == i`.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/SegmentProd.png" alt>
--- </div>
-segmentProd :: forall v1 v2 t tindices . (TensorType t,
-                                          OneOf '[(Data.Complex.Complex Double),
-                                                  (Data.Complex.Complex Float),
-                                                  Data.Int.Int16,
-                                                  Data.Int.Int32,
-                                                  Data.Int.Int64, Data.Int.Int8,
-                                                  Data.Word.Word16,
-                                                  Data.Word.Word8, Double,
-                                                  Float] t, TensorType tindices,
-                                          OneOf '[Data.Int.Int32,
-                                                  Data.Int.Int64] tindices) =>
-               Tensor v1 t -- ^ __data__
-               -> Tensor v2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
-                                     -- first dimension.  Values should be sorted and can be repeated.
-               -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
-               -- has size `k`, the number of segments.
-segmentProd data' segment_ids | eqLengthGuard [] =
-    buildOp (opDef "SegmentProd"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        data' segment_ids
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg { name: "data" type_attr: "T" }
-input_arg {
-  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
-  name: "segment_ids"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes the mean along segments of a tensor.
---
--- Read [the section on
--- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
--- of segments.
--- 
--- Computes a tensor such that
--- \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
--- over `j` such that `segment_ids[j] == i` and `N` is the total number of
--- values summed.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/SegmentMean.png" alt>
--- </div>
-segmentMean :: forall v1 v2 t tindices . (TensorType t, OneOf '[Data.Int.Int16,
-                                                                Data.Int.Int32,
-                                                                Data.Int.Int64,
-                                                                Data.Int.Int8,
-                                                                Data.Word.Word16,
-                                                                Data.Word.Word8,
-                                                                Double,
-                                                                Float] t,
-                                          TensorType tindices,
-                                          OneOf '[Data.Int.Int32,
-                                                  Data.Int.Int64] tindices) =>
-               Tensor v1 t -- ^ __data__
-               -> Tensor v2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
-                                     -- first dimension.  Values should be sorted and can be repeated.
-               -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
-               -- has size `k`, the number of segments.
-segmentMean data' segment_ids | eqLengthGuard [] =
-    buildOp (opDef "SegmentMean"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        data' segment_ids
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg { name: "data" type_attr: "T" }
-input_arg {
-  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
-  name: "segment_ids"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes the sum along segments of a tensor.
---
--- Read [the section on Segmentation](../../api_docs/python/math_ops.md#segmentation)
--- for an explanation of segments.
--- 
--- Computes a tensor such that
--- \\(output_i = \sum_j data_j\\) where sum is over `j` such
--- that `segment_ids[j] == i`.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/SegmentSum.png" alt>
--- </div>
-segmentSum :: forall v1 v2 t tindices . (TensorType t,
-                                         OneOf '[(Data.Complex.Complex Double),
-                                                 (Data.Complex.Complex Float),
-                                                 Data.Int.Int16, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Data.Word.Word16,
-                                                 Data.Word.Word8, Double,
-                                                 Float] t, TensorType tindices,
-                                         OneOf '[Data.Int.Int32,
-                                                 Data.Int.Int64] tindices) =>
-              Tensor v1 t -- ^ __data__
-              -> Tensor v2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
-                                    -- first dimension.  Values should be sorted and can be repeated.
-              -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
-              -- has size `k`, the number of segments.
-segmentSum data' segment_ids | eqLengthGuard [] =
-    buildOp (opDef "SegmentSum"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        data' segment_ids
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg { name: "data" type_attr: "T" }
-input_arg {
-  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
-  name: "segment_ids"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Returns the index with the smallest value across dimensions of a tensor.
-
-argMin :: forall v1 v2 t tidx . (TensorType t,
-                                 OneOf '[(Data.Complex.Complex Double),
-                                         (Data.Complex.Complex Float),
-                                         Data.Int.Int16, Data.Int.Int32,
-                                         Data.Int.Int64, Data.Int.Int8,
-                                         Data.Word.Word16, Data.Word.Word8,
-                                         Double, Float] t, TensorType tidx,
-                                 OneOf '[Data.Int.Int32,
-                                         Data.Int.Int64] tidx) =>
-          Tensor v1 t -- ^ __input__
-          -> Tensor v2 tidx -- ^ __dimension__: int32, 0 <= dimension < rank(input).  Describes which dimension
-                            -- of the input Tensor to reduce across. For vectors, use dimension = 0.
-          -> Tensor Value Data.Int.Int64 -- ^ __output__
-argMin input dimension | eqLengthGuard [] =
-    buildOp (opDef "ArgMin"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        input dimension
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-input_arg {
-  description: "int32, 0 <= dimension < rank(input).  Describes which dimension\nof the input Tensor to reduce across. For vectors, use dimension = 0."
-  name: "dimension"
-  type_attr: "Tidx"
-}
-output_arg { name: "output" type: DT_INT64 }
--}
-
--- | Computes the maximum of elements across dimensions of a tensor.
---
--- Reduces `input` along the dimensions given in `reduction_indices`. Unless
--- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
--- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
--- retained with length 1.
-max :: forall v1 v2 t tidx . (TensorType t,
-                              OneOf '[(Data.Complex.Complex Double),
-                                      (Data.Complex.Complex Float),
-                                      Data.Int.Int16, Data.Int.Int32,
-                                      Data.Int.Int64, Data.Int.Int8,
-                                      Data.Word.Word16, Data.Word.Word8, Double,
-                                      Float] t, TensorType tidx,
-                              OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
-       Tensor v1 t -- ^ __input__: The tensor to reduce.
-       -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
-       -> Tensor Value t -- ^ __output__: The reduced tensor.
-max input reduction_indices | eqLengthGuard [] =
-    buildOp (opDef "Max"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        input reduction_indices
-{-
-attr {
-  default_value { b: false }
-  description: "If true, retain reduced dimensions with length 1."
-  name: "keep_dims"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "The tensor to reduce." name: "input" type_attr: "T"
-}
-input_arg {
-  description: "The dimensions to reduce."
-  name: "reduction_indices"
-  type_attr: "Tidx"
-}
-output_arg {
-  description: "The reduced tensor." name: "output" type_attr: "T"
-}
--}
-
--- | Computes the minimum of elements across dimensions of a tensor.
---
--- Reduces `input` along the dimensions given in `reduction_indices`. Unless
--- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
--- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
--- retained with length 1.
-min :: forall v1 v2 t tidx . (TensorType t,
-                              OneOf '[(Data.Complex.Complex Double),
-                                      (Data.Complex.Complex Float),
-                                      Data.Int.Int16, Data.Int.Int32,
-                                      Data.Int.Int64, Data.Int.Int8,
-                                      Data.Word.Word16, Data.Word.Word8, Double,
-                                      Float] t, TensorType tidx,
-                              OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
-       Tensor v1 t -- ^ __input__: The tensor to reduce.
-       -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
-       -> Tensor Value t -- ^ __output__: The reduced tensor.
-min input reduction_indices | eqLengthGuard [] =
-    buildOp (opDef "Min"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        input reduction_indices
-{-
-attr {
-  default_value { b: false }
-  description: "If true, retain reduced dimensions with length 1."
-  name: "keep_dims"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "The tensor to reduce." name: "input" type_attr: "T"
-}
-input_arg {
-  description: "The dimensions to reduce."
-  name: "reduction_indices"
-  type_attr: "Tidx"
-}
-output_arg {
-  description: "The reduced tensor." name: "output" type_attr: "T"
-}
--}
-
--- | Computes the product of elements across dimensions of a tensor.
---
--- Reduces `input` along the dimensions given in `reduction_indices`. Unless
--- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
--- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
--- retained with length 1.
-prod :: forall v1 v2 t tidx . (TensorType t,
-                               OneOf '[(Data.Complex.Complex Double),
-                                       (Data.Complex.Complex Float),
-                                       Data.Int.Int16, Data.Int.Int32,
-                                       Data.Int.Int64, Data.Int.Int8,
-                                       Data.Word.Word16, Data.Word.Word8,
-                                       Double, Float] t, TensorType tidx,
-                               OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
-        Tensor v1 t -- ^ __input__: The tensor to reduce.
-        -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
-        -> Tensor Value t -- ^ __output__: The reduced tensor.
-prod input reduction_indices | eqLengthGuard [] =
-    buildOp (opDef "Prod"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        input reduction_indices
-{-
-attr {
-  default_value { b: false }
-  description: "If true, retain reduced dimensions with length 1."
-  name: "keep_dims"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "The tensor to reduce." name: "input" type_attr: "T"
-}
-input_arg {
-  description: "The dimensions to reduce."
-  name: "reduction_indices"
-  type_attr: "Tidx"
-}
-output_arg {
-  description: "The reduced tensor." name: "output" type_attr: "T"
-}
--}
-
--- | Computes the sum of elements across dimensions of a tensor.
---
--- Reduces `input` along the dimensions given in `reduction_indices`. Unless
--- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
--- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
--- retained with length 1.
-sum :: forall v1 v2 t tidx . (TensorType t,
-                              OneOf '[(Data.Complex.Complex Double),
-                                      (Data.Complex.Complex Float),
-                                      Data.Int.Int16, Data.Int.Int32,
-                                      Data.Int.Int64, Data.Int.Int8,
-                                      Data.Word.Word16, Data.Word.Word8, Double,
-                                      Float] t, TensorType tidx,
-                              OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
-       Tensor v1 t -- ^ __input__: The tensor to reduce.
-       -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
-       -> Tensor Value t -- ^ __output__: The reduced tensor.
-sum input reduction_indices | eqLengthGuard [] =
-    buildOp (opDef "Sum"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        input reduction_indices
-{-
-attr {
-  default_value { b: false }
-  description: "If true, retain reduced dimensions with length 1."
-  name: "keep_dims"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "The tensor to reduce." name: "input" type_attr: "T"
-}
-input_arg {
-  description: "The dimensions to reduce."
-  name: "reduction_indices"
-  type_attr: "Tidx"
-}
-output_arg {
-  description: "The reduced tensor." name: "output" type_attr: "T"
-}
--}
-
--- | Multiply matrix "a" by matrix "b".
---
--- The inputs must be two-dimensional matrices and the inner dimension of "a" must
--- match the outer dimension of "b". This op is optimized for the case where at
--- least one of "a" or "b" is sparse. The breakeven for using this versus a dense
--- matrix multiply on one platform was 30% zero values in the sparse matrix.
-sparseMatMul :: forall v1 v2 ta tb . (TensorType ta, OneOf '[Data.Word.Word16,
-                                                             Float] ta,
-                                      TensorType tb, OneOf '[Data.Word.Word16,
-                                                             Float] tb) =>
-                Tensor v1 ta -- ^ __a__
-                -> Tensor v2 tb -- ^ __b__
-                -> Tensor Value Float -- ^ __product__
-sparseMatMul a b | eqLengthGuard [] =
-    buildOp (opDef "SparseMatMul"
-             & opAttr "Ta" .~ tensorType (undefined :: ta)
-             & opAttr "Tb" .~ tensorType (undefined :: tb))
-        a b
-{-
-attr {
-  default_value { b: false } name: "transpose_a" type: "bool"
-}
-attr {
-  default_value { b: false } name: "transpose_b" type: "bool"
-}
-attr {
-  default_value { b: false } name: "a_is_sparse" type: "bool"
-}
-attr {
-  default_value { b: false } name: "b_is_sparse" type: "bool"
-}
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } }
-  default_value { type: DT_FLOAT }
-  name: "Ta"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } }
-  default_value { type: DT_FLOAT }
-  name: "Tb"
-  type: "type"
-}
-input_arg { name: "a" type_attr: "Ta" }
-input_arg { name: "b" type_attr: "Tb" }
-output_arg { name: "product" type: DT_FLOAT }
--}
-
--- | Multiply the matrix "a" by the matrix "b".
---
--- The inputs must be two-dimensional matrices and the inner dimension of
--- "a" (after being transposed if transpose_a is true) must match the
--- outer dimension of "b" (after being transposed if transposed_b is
--- true).
--- 
--- *Note*: The default kernel implementation for MatMul on GPUs uses
--- cublas.
-matMul :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                  (Data.Complex.Complex Float),
-                                                  Data.Int.Int32,
-                                                  Data.Word.Word16, Double,
-                                                  Float] t) =>
-          Tensor v1 t -- ^ __a__
-          -> Tensor v2 t -- ^ __b__
-          -> Tensor Value t -- ^ __product__
-matMul a b | eqLengthGuard [] =
-    buildOp (opDef "MatMul"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        a b
-{-
-attr {
-  default_value { b: false }
-  description: "If true, \"a\" is transposed before multiplication."
-  name: "transpose_a"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "If true, \"b\" is transposed before multiplication."
-  name: "transpose_b"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "a" type_attr: "T" }
-input_arg { name: "b" type_attr: "T" }
-output_arg { name: "product" type_attr: "T" }
--}
-
--- | Returns the truth value of x AND y element-wise.
---
--- *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-logicalAnd :: Tensor v1 Bool -- ^ __x__
-              -> Tensor v2 Bool -- ^ __y__
-              -> Tensor Value Bool -- ^ __z__
-logicalAnd x y | eqLengthGuard [] =
-    buildOp (opDef "LogicalAnd")
-        x y
-{-
-input_arg { name: "x" type: DT_BOOL }
-input_arg { name: "y" type: DT_BOOL }
-output_arg { name: "z" type: DT_BOOL }
--}
-
--- | Returns the truth value of (x == y) element-wise.
---
--- *NOTE*: `Equal` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-equal :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                 (Data.Complex.Complex Float),
-                                                 Bool,
-                                                 Data.ByteString.ByteString,
-                                                 Data.Int.Int16, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Data.Word.Word16,
-                                                 Data.Word.Word8, Double,
-                                                 Float] t) =>
-         Tensor v1 t -- ^ __x__
-         -> Tensor v2 t -- ^ __y__
-         -> Tensor Value Bool -- ^ __z__
-equal x y | eqLengthGuard [] =
-    buildOp (opDef "Equal"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_QUINT8
-      type: DT_QINT8
-      type: DT_QINT32
-      type: DT_STRING
-      type: DT_BOOL
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type: DT_BOOL }
--}
-
--- | Returns the truth value of (x >= y) element-wise.
---
--- *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-greaterEqual :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                        Data.Int.Int32,
-                                                        Data.Int.Int64,
-                                                        Data.Int.Int8,
-                                                        Data.Word.Word16,
-                                                        Data.Word.Word8, Double,
-                                                        Float] t) =>
-                Tensor v1 t -- ^ __x__
-                -> Tensor v2 t -- ^ __y__
-                -> Tensor Value Bool -- ^ __z__
-greaterEqual x y | eqLengthGuard [] =
-    buildOp (opDef "GreaterEqual"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type: DT_BOOL }
--}
-
--- | Returns the truth value of (x <= y) element-wise.
---
--- *NOTE*: `LessEqual` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-lessEqual :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                     Data.Int.Int32,
-                                                     Data.Int.Int64,
-                                                     Data.Int.Int8,
-                                                     Data.Word.Word16,
-                                                     Data.Word.Word8, Double,
-                                                     Float] t) =>
-             Tensor v1 t -- ^ __x__
-             -> Tensor v2 t -- ^ __y__
-             -> Tensor Value Bool -- ^ __z__
-lessEqual x y | eqLengthGuard [] =
-    buildOp (opDef "LessEqual"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type: DT_BOOL }
--}
-
--- | Returns the truth value of (x < y) element-wise.
---
--- *NOTE*: `Less` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-less :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
-                                                Data.Int.Int64, Data.Int.Int8,
-                                                Data.Word.Word16,
-                                                Data.Word.Word8, Double,
-                                                Float] t) =>
-        Tensor v1 t -- ^ __x__
-        -> Tensor v2 t -- ^ __y__
-        -> Tensor Value Bool -- ^ __z__
-less x y | eqLengthGuard [] =
-    buildOp (opDef "Less"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type: DT_BOOL }
--}
-
--- | Compute the polygamma function \\(\psi^{(n)}(x)\\).
---
--- The polygamma function is defined as:
--- 
--- ```
--- \psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)
--- ```
--- where \\(\psi(x)\\) is the digamma function.
-polygamma :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
-             Tensor v1 t -- ^ __a__
-             -> Tensor v2 t -- ^ __x__
-             -> Tensor Value t -- ^ __z__
-polygamma a x | eqLengthGuard [] =
-    buildOp (opDef "Polygamma"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        a x
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "a" type_attr: "T" }
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Compute the lower regularized incomplete Gamma function `Q(a, x)`.
---
--- The lower regularized incomplete Gamma function is defined as:
--- 
--- ```
--- P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)
--- ```
--- where
--- ```
--- gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt
--- ```
--- is the lower incomplete Gamma function.
--- 
--- Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
--- Gamma function.
-igamma :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
-          Tensor v1 t -- ^ __a__
-          -> Tensor v2 t -- ^ __x__
-          -> Tensor Value t -- ^ __z__
-igamma a x | eqLengthGuard [] =
-    buildOp (opDef "Igamma"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        a x
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "a" type_attr: "T" }
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Compute the upper regularized incomplete Gamma function `Q(a, x)`.
---
--- The upper regularized incomplete Gamma function is defined as:
--- 
--- ```
--- Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)
--- ```
--- where
--- ```
--- Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt
--- ```
--- is the upper incomplete Gama function.
--- 
--- Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
--- Gamma function.
-igammac :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
-           Tensor v1 t -- ^ __a__
-           -> Tensor v2 t -- ^ __x__
-           -> Tensor Value t -- ^ __z__
-igammac a x | eqLengthGuard [] =
-    buildOp (opDef "Igammac"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        a x
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "a" type_attr: "T" }
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Returns element-wise remainder of division.
---
--- *NOTE*: `Mod` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-mod :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64,
-                                               Double, Float] t) =>
-       Tensor v1 t -- ^ __x__
-       -> Tensor v2 t -- ^ __y__
-       -> Tensor Value t -- ^ __z__
-mod x y | eqLengthGuard [] =
-    buildOp (opDef "Mod"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Returns the min of x and y (i.e. x < y ? x : y) element-wise.
---
--- *NOTE*: `Minimum` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-minimum :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32,
-                                                   Data.Int.Int64,
-                                                   Data.Word.Word16, Double,
-                                                   Float] t) =>
-           Tensor v1 t -- ^ __x__
-           -> Tensor v2 t -- ^ __y__
-           -> Tensor Value t -- ^ __z__
-minimum x y | eqLengthGuard [] =
-    buildOp (opDef "Minimum"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Returns the max of x and y (i.e. x > y ? x : y) element-wise.
---
--- *NOTE*: `Maximum` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-maximum :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32,
-                                                   Data.Int.Int64,
-                                                   Data.Word.Word16, Double,
-                                                   Float] t) =>
-           Tensor v1 t -- ^ __x__
-           -> Tensor v2 t -- ^ __y__
-           -> Tensor Value t -- ^ __z__
-maximum x y | eqLengthGuard [] =
-    buildOp (opDef "Maximum"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Returns (x - y)(x - y) element-wise.
---
--- *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-squaredDifference :: forall v1 v2 t . (TensorType t,
-                                       OneOf '[(Data.Complex.Complex Double),
-                                               (Data.Complex.Complex Float),
-                                               Data.Int.Int32, Data.Int.Int64,
-                                               Data.Word.Word16, Double,
-                                               Float] t) =>
-                     Tensor v1 t -- ^ __x__
-                     -> Tensor v2 t -- ^ __y__
-                     -> Tensor Value t -- ^ __z__
-squaredDifference x y | eqLengthGuard [] =
-    buildOp (opDef "SquaredDifference"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Computes softplus gradients for a softplus operation.
-
-softplusGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                        Data.Int.Int32,
-                                                        Data.Int.Int64,
-                                                        Data.Int.Int8,
-                                                        Data.Word.Word16,
-                                                        Data.Word.Word8, Double,
-                                                        Float] t) =>
-                Tensor v1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softplus operation.
-                -> Tensor v2 t -- ^ __features__: The features passed as input to the corresponding softplus operation.
-                -> Tensor Value t -- ^ __backprops__: The gradients: `gradients / (1 + exp(-features))`.
-softplusGrad gradients features | eqLengthGuard [] =
-    buildOp (opDef "SoftplusGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        gradients features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The backpropagated gradients to the corresponding softplus operation."
-  name: "gradients"
-  type_attr: "T"
-}
-input_arg {
-  description: "The features passed as input to the corresponding softplus operation."
-  name: "features"
-  type_attr: "T"
-}
-output_arg {
-  description: "The gradients: `gradients / (1 + exp(-features))`."
-  name: "backprops"
-  type_attr: "T"
-}
--}
-
--- | BatchToSpace for 4-D tensors of type T.
---
--- This is a legacy version of the more general BatchToSpaceND.
--- 
--- Rearranges (permutes) data from batch into blocks of spatial data, followed by
--- cropping. This is the reverse transformation of SpaceToBatch. More specifically,
--- this op outputs a copy of the input tensor where values from the `batch`
--- dimension are moved in spatial blocks to the `height` and `width` dimensions,
--- followed by cropping along the `height` and `width` dimensions.
-batchToSpace :: forall v1 v2 t tidx . (TensorType t, TensorType tidx,
-                                       OneOf '[Data.Int.Int32,
-                                               Data.Int.Int64] tidx) =>
-                Data.Int.Int64 -- ^ __block_size__
-                -> Tensor v1 t -- ^ __input__: 4-D tensor with shape
-                               -- `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
-                               --   depth]`. Note that the batch size of the input tensor must be divisible by
-                               -- `block_size * block_size`.
-                -> Tensor v2 tidx -- ^ __crops__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
-                                  -- how many elements to crop from the intermediate result across the spatial
-                                  -- dimensions as follows:
-                                  -- 
-                                  --     crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
-                -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, height, width, depth]`, where:
-                -- 
-                --       height = height_pad - crop_top - crop_bottom
-                --       width = width_pad - crop_left - crop_right
-                -- 
-                -- The attr `block_size` must be greater than one. It indicates the block size.
-                -- 
-                -- Some examples:
-                -- 
-                -- (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
-                -- 
-                -- ```prettyprint
-                -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
-                -- ```
-                -- 
-                -- The output tensor has shape `[1, 2, 2, 1]` and value:
-                -- 
-                -- ```prettyprint
-                -- x = [[[[1], [2]], [[3], [4]]]]
-                -- ```
-                -- 
-                -- (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
-                -- 
-                -- ```prettyprint
-                -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
-                -- ```
-                -- 
-                -- The output tensor has shape `[1, 2, 2, 3]` and value:
-                -- 
-                -- ```prettyprint
-                -- x = [[[[1, 2, 3], [4, 5, 6]],
-                --       [[7, 8, 9], [10, 11, 12]]]]
-                -- ```
-                -- 
-                -- (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
-                -- 
-                -- ```prettyprint
-                -- x = [[[[1], [3]], [[5], [7]]],
-                --      [[[2], [4]], [[10], [12]]],
-                --      [[[5], [7]], [[13], [15]]],
-                --      [[[6], [8]], [[14], [16]]]]
-                -- ```
-                -- 
-                -- The output tensor has shape `[1, 4, 4, 1]` and value:
-                -- 
-                -- ```prettyprint
-                -- x = [[[1],   [2],  [3],  [4]],
-                --      [[5],   [6],  [7],  [8]],
-                --      [[9],  [10], [11],  [12]],
-                --      [[13], [14], [15],  [16]]]
-                -- ```
-                -- 
-                -- (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
-                -- 
-                -- ```prettyprint
-                -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
-                --      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
-                -- ```
-                -- 
-                -- The output tensor has shape `[2, 2, 4, 1]` and value:
-                -- 
-                -- ```prettyprint
-                -- x = [[[[1], [3]], [[5], [7]]],
-                --      [[[2], [4]], [[10], [12]]],
-                --      [[[5], [7]], [[13], [15]]],
-                --      [[[6], [8]], [[14], [16]]]]
-                -- ```
-batchToSpace block_size input crops | eqLengthGuard [] =
-    buildOp (opDef "BatchToSpace"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx)
-             & opAttr "block_size" .~ block_size)
-        input crops
-{-
-attr { name: "T" type: "type" }
-attr {
-  has_minimum: true minimum: 2 name: "block_size" type: "int"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "4-D tensor with shape\n`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n  depth]`. Note that the batch size of the input tensor must be divisible by\n`block_size * block_size`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\nhow many elements to crop from the intermediate result across the spatial\ndimensions as follows:\n\n    crops = [[crop_top, crop_bottom], [crop_left, crop_right]]"
-  name: "crops"
-  type_attr: "Tidx"
-}
-output_arg {
-  description: "4-D with shape `[batch, height, width, depth]`, where:\n\n      height = height_pad - crop_top - crop_bottom\n      width = width_pad - crop_left - crop_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```prettyprint\nx = [[[1],   [2],  [3],  [4]],\n     [[5],   [6],  [7],  [8]],\n     [[9],  [10], [11],  [12]],\n     [[13], [14], [15],  [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```"
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Returns x * y element-wise.
---
--- *NOTE*: `Mul` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-mul :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                               (Data.Complex.Complex Float),
-                                               Data.Int.Int16, Data.Int.Int32,
-                                               Data.Int.Int64, Data.Int.Int8,
-                                               Data.Word.Word16,
-                                               Data.Word.Word8, Double,
-                                               Float] t) =>
-       Tensor v1 t -- ^ __x__
-       -> Tensor v2 t -- ^ __y__
-       -> Tensor Value t -- ^ __z__
-mul x y | eqLengthGuard [] =
-    buildOp (opDef "Mul"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Returns element-wise integer closest to x.
---
--- If the result is midway between two representable values,
--- the even representable is chosen.
--- For example:
--- 
--- ```
--- rint(-1.5) ==> -2.0
--- rint(0.5000001) ==> 1.0
--- rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
--- ```
-rint :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
-        Tensor v1 t -- ^ __x__
-        -> Tensor Value t -- ^ __y__
-rint x | eqLengthGuard [] =
-    buildOp (opDef "Rint"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Returns element-wise smallest integer in not less than x.
-
-ceil :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                             Float] t) => Tensor v1 t -- ^ __x__
-        -> Tensor Value t -- ^ __y__
-ceil x | eqLengthGuard [] =
-    buildOp (opDef "Ceil"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Returns element-wise largest integer not greater than x.
-
-floor :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                              Float] t) =>
-         Tensor v1 t -- ^ __x__
-         -> Tensor Value t -- ^ __y__
-floor x | eqLengthGuard [] =
-    buildOp (opDef "Floor"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Performs 3D max pooling on the input.
-
-maxPool3D :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                  (Data.Complex.Complex Float),
-                                                  Data.Int.Int16,
-                                                  Data.Int.Int32,
-                                                  Data.Int.Int64, Data.Int.Int8,
-                                                  Data.Word.Word16,
-                                                  Data.Word.Word8, Double,
-                                                  Float] t) =>
-             Tensor v1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
-             -> Tensor Value t -- ^ __output__: The max pooled output tensor.
-maxPool3D input | eqLengthGuard [] =
-    buildOp (opDef "MaxPool3D"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over."
-  name: "input"
-  type_attr: "T"
-}
-output_arg {
-  description: "The max pooled output tensor."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Returns which elements of x are Inf.
---
--- @compatibility(numpy)
--- Equivalent to np.isinf
--- @end_compatibility
-isInf :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                              Float] t) =>
-         Tensor v1 t -- ^ __x__
-         -> Tensor Value Bool -- ^ __y__
-isInf x | eqLengthGuard [] =
-    buildOp (opDef "IsInf"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type: DT_BOOL }
--}
-
--- | Computes the gradients of depthwise convolution with respect to the input.
-
-depthwiseConv2dNativeBackpropInput :: forall v1 v2 v3 t . (TensorType t,
-                                                           OneOf '[Double,
-                                                                   Float] t) =>
-                                      Tensor v1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`,
-                                                               -- where `input` is a 4-D `[batch, height, width, channels]` tensor.
-                                      -> Tensor v2 t -- ^ __filter__: 4-D with shape
-                                                     -- `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
-                                      -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`.
-                                                     -- Gradients w.r.t. the output of the convolution.
-                                      -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
-                                      -- w.r.t. the input of the convolution.
-depthwiseConv2dNativeBackpropInput input_sizes filter
-                                   out_backprop | eqLengthGuard [] =
-    buildOp (opDef "DepthwiseConv2dNativeBackpropInput"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input_sizes filter out_backprop
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the input\nof the convolution."
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "An integer vector representing the shape of `input`,\nwhere `input` is a 4-D `[batch, height, width, channels]` tensor."
-  name: "input_sizes"
-  type: DT_INT32
-}
-input_arg {
-  description: "4-D with shape\n`[filter_height, filter_width, in_channels, depthwise_multiplier]`."
-  name: "filter"
-  type_attr: "T"
-}
-input_arg {
-  description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
-  name: "out_backprop"
-  type_attr: "T"
-}
-output_arg {
-  description: "4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient\nw.r.t. the input of the convolution."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Returns which elements of x are NaN.
---
--- @compatibility(numpy)
--- Equivalent to np.isnan
--- @end_compatibility
-isNan :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                              Float] t) =>
-         Tensor v1 t -- ^ __x__
-         -> Tensor Value Bool -- ^ __y__
-isNan x | eqLengthGuard [] =
-    buildOp (opDef "IsNan"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type: DT_BOOL }
--}
-
--- | Computes natural logarithm of (1 + x) element-wise.
---
--- I.e., \\(y = \log_e (1 + x)\\).
-log1p :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                              (Data.Complex.Complex Float),
-                                              Data.Word.Word16, Double,
-                                              Float] t) =>
-         Tensor v1 t -- ^ __x__
-         -> Tensor Value t -- ^ __y__
-log1p x | eqLengthGuard [] =
-    buildOp (opDef "Log1p"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Computes asin of x element-wise.
-
-asin :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                             (Data.Complex.Complex Float),
-                                             Data.Int.Int32, Data.Int.Int64,
-                                             Data.Word.Word16, Double,
-                                             Float] t) => Tensor v1 t -- ^ __x__
-        -> Tensor Value t -- ^ __y__
-asin x | eqLengthGuard [] =
-    buildOp (opDef "Asin"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Finds values and indices of the `k` largest elements for the last dimension.
---
--- If the input is a vector (rank-1), finds the `k` largest entries in the vector
--- and outputs their values and indices as vectors.  Thus `values[j]` is the
--- `j`-th largest entry in `input`, and its index is `indices[j]`.
--- 
--- For matrices (resp. higher rank input), computes the top `k` entries in each
--- row (resp. vector along the last dimension).  Thus,
--- 
---     values.shape = indices.shape = input.shape[:-1] + [k]
--- 
--- If two elements are equal, the lower-index element appears first.
--- 
--- This is the same as `TopK`, but takes `k` as in input rather than an attr.
-topKV2 :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                  Data.Int.Int32,
-                                                  Data.Int.Int64, Data.Int.Int8,
-                                                  Data.Word.Word16,
-                                                  Data.Word.Word8, Double,
-                                                  Float] t) =>
-          Tensor v1 t -- ^ __input__: 1-D or higher with last dimension at least `k`.
-          -> Tensor v2 Data.Int.Int32 -- ^ __k__: 0-D.  Number of top elements to look for along the last dimension (along each
-                                      -- row for matrices).
-          -> (Tensor Value t, Tensor Value Data.Int.Int32)
-          -- ^ (__values__, __indices__)
-          --
-          -- * __values__: The `k` largest elements along each last dimensional slice.
-          --
-          -- * __indices__: The indices of `values` within the last dimension of `input`.
-topKV2 input k | eqLengthGuard [] =
-    buildOp (opDef "TopKV2"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input k
-{-
-attr {
-  default_value { b: true }
-  description: "If true the resulting `k` elements will be sorted by the values in\ndescending order."
-  name: "sorted"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "1-D or higher with last dimension at least `k`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "0-D.  Number of top elements to look for along the last dimension (along each\nrow for matrices)."
-  name: "k"
-  type: DT_INT32
-}
-output_arg {
-  description: "The `k` largest elements along each last dimensional slice."
-  name: "values"
-  type_attr: "T"
-}
-output_arg {
-  description: "The indices of `values` within the last dimension of `input`."
-  name: "indices"
-  type: DT_INT32
-}
--}
-
--- | Computes cos of x element-wise.
-
-cos :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                            (Data.Complex.Complex Float),
-                                            Data.Word.Word16, Double,
-                                            Float] t) => Tensor v1 t -- ^ __x__
-       -> Tensor Value t -- ^ __y__
-cos x | eqLengthGuard [] =
-    buildOp (opDef "Cos"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Computes sin of x element-wise.
-
-sin :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                            (Data.Complex.Complex Float),
-                                            Data.Word.Word16, Double,
-                                            Float] t) => Tensor v1 t -- ^ __x__
-       -> Tensor Value t -- ^ __y__
-sin x | eqLengthGuard [] =
-    buildOp (opDef "Sin"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Outputs random integers from a uniform distribution.
---
--- The generated values are uniform integers in the range `[minval, maxval)`.
--- The lower bound `minval` is included in the range, while the upper bound
--- `maxval` is excluded.
--- 
--- The random integers are slightly biased unless `maxval - minval` is an exact
--- power of two.  The bias is small for values of `maxval - minval` significantly
--- smaller than the range of the output (either `2^32` or `2^64`).
-randomUniformInt :: forall v1 v2 v3 tout t . (TensorType tout,
-                                              OneOf '[Data.Int.Int32,
-                                                      Data.Int.Int64] tout,
-                                              TensorType t,
-                                              OneOf '[Data.Int.Int32,
-                                                      Data.Int.Int64] t) =>
-                    Tensor v1 t -- ^ __shape__: The shape of the output tensor.
-                    -> Tensor v2 tout -- ^ __minval__: 0-D.  Inclusive lower bound on the generated integers.
-                    -> Tensor v3 tout -- ^ __maxval__: 0-D.  Exclusive upper bound on the generated integers.
-                    -> Build (Tensor Value tout) -- ^ __output__: A tensor of the specified shape filled with uniform random integers.
-randomUniformInt shape minval maxval | eqLengthGuard [] =
-    buildOp (opDef "RandomUniformInt"
-             & opAttr "Tout" .~ tensorType (undefined :: tout)
-             & opAttr "T" .~ tensorType (undefined :: t))
-        shape minval maxval
-{-
-attr {
-  default_value { i: 0 }
-  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "A second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tout"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The shape of the output tensor."
-  name: "shape"
-  type_attr: "T"
-}
-input_arg {
-  description: "0-D.  Inclusive lower bound on the generated integers."
-  name: "minval"
-  type_attr: "Tout"
-}
-input_arg {
-  description: "0-D.  Exclusive upper bound on the generated integers."
-  name: "maxval"
-  type_attr: "Tout"
-}
-output_arg {
-  description: "A tensor of the specified shape filled with uniform random integers."
-  name: "output"
-  type_attr: "Tout"
-}
--}
-
--- | Computes the complementary error function of `x` element-wise.
-
-erfc :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                             Float] t) => Tensor v1 t -- ^ __x__
-        -> Tensor Value t -- ^ __y__
-erfc x | eqLengthGuard [] =
-    buildOp (opDef "Erfc"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Computes Psi, the derivative of Lgamma (the log of the absolute value of
---
--- `Gamma(x)`), element-wise.
-digamma :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                                Float] t) =>
-           Tensor v1 t -- ^ __x__
-           -> Tensor Value t -- ^ __y__
-digamma x | eqLengthGuard [] =
-    buildOp (opDef "Digamma"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Performs a resize and padding as a preprocess during a convolution.
---
--- It's often possible to do spatial transformations more efficiently as part of
--- the packing stage of a convolution, so this op allows for an optimized
--- implementation where these stages are fused together. This prevents the need to
--- write out the intermediate results as whole tensors, reducing memory pressure,
--- and we can get some latency gains by merging the transformation calculations.
--- The data_format attribute for Conv2D isn't supported by this op, and defaults to
--- 'NHWC' order.
--- Internally this op uses a single per-graph scratch buffer, which means that it
--- will block if multiple versions are being run in parallel. This is because this
--- operator is primarily an optimization to minimize memory usage.
-fusedResizeAndPadConv2D :: forall v1 v2 v3 v4 t . (TensorType t,
-                                                   OneOf '[Data.Word.Word16,
-                                                           Double, Float] t) =>
-                           Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
-                           -> Tensor v2 Data.Int.Int32 -- ^ __size__: A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
-                                                       -- new size for the images.
-                           -> Tensor v3 Data.Int.Int32 -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
-                                                       -- rows must be the same as the rank of `input`.
-                           -> Tensor v4 t -- ^ __filter__: 4-D with shape
-                                          -- `[filter_height, filter_width, in_channels, out_channels]`.
-                           -> Tensor Value t -- ^ __output__
-fusedResizeAndPadConv2D input size paddings filter | eqLengthGuard [] =
-    buildOp (opDef "FusedResizeAndPadConv2D"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input size paddings filter
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If true, rescale input by (new_height - 1) / (height - 1),\nwhich exactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
-  name: "resize_align_corners"
-  type: "bool"
-}
-attr {
-  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
-  name: "mode"
-  type: "string"
-}
-attr {
-  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format."
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
-  name: "size"
-  type: DT_INT32
-}
-input_arg {
-  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
-  name: "paddings"
-  type: DT_INT32
-}
-input_arg {
-  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
-  name: "filter"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Returns x - y element-wise.
---
--- *NOTE*: `Sub` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-sub :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                               (Data.Complex.Complex Float),
-                                               Data.Int.Int32, Data.Int.Int64,
-                                               Data.Word.Word16, Double,
-                                               Float] t) =>
-       Tensor v1 t -- ^ __x__
-       -> Tensor v2 t -- ^ __y__
-       -> Tensor Value t -- ^ __z__
-sub x y | eqLengthGuard [] =
-    buildOp (opDef "Sub"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Returns an element-wise indication of the sign of a number.
---
--- `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
--- 
--- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
-sign :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                             (Data.Complex.Complex Float),
-                                             Data.Int.Int32, Data.Int.Int64,
-                                             Data.Word.Word16, Double,
-                                             Float] t) => Tensor v1 t -- ^ __x__
-        -> Tensor Value t -- ^ __y__
-sign x | eqLengthGuard [] =
-    buildOp (opDef "Sign"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Computes the log of the absolute value of `Gamma(x)` element-wise.
-
-lgamma :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                               Float] t) =>
-          Tensor v1 t -- ^ __x__
-          -> Tensor Value t -- ^ __y__
-lgamma x | eqLengthGuard [] =
-    buildOp (opDef "Lgamma"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Computes natural logarithm of x element-wise.
---
--- I.e., \\(y = \log_e x\\).
-log :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                            (Data.Complex.Complex Float),
-                                            Data.Word.Word16, Double,
-                                            Float] t) => Tensor v1 t -- ^ __x__
-       -> Tensor Value t -- ^ __y__
-log x | eqLengthGuard [] =
-    buildOp (opDef "Log"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Computes exponential of x element-wise.  \\(y = e^x\\).
-
-exp :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                            (Data.Complex.Complex Float),
-                                            Data.Word.Word16, Double,
-                                            Float] t) => Tensor v1 t -- ^ __x__
-       -> Tensor Value t -- ^ __y__
-exp x | eqLengthGuard [] =
-    buildOp (opDef "Exp"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
---
--- The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
--- `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
--- input channel is processed independently of the others with its own structuring
--- function. The `output` tensor has shape
--- `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
--- tensor depend on the `padding` algorithm. We currently only support the default
--- "NHWC" `data_format`.
--- 
--- In detail, the grayscale morphological 2-D dilation is the max-sum correlation
--- (for consistency with `conv2d`, we use unmirrored filters):
--- 
---     output[b, y, x, c] =
---        max_{dy, dx} input[b,
---                           strides[1] * y + rates[1] * dy,
---                           strides[2] * x + rates[2] * dx,
---                           c] +
---                     filter[dy, dx, c]
--- 
--- Max-pooling is a special case when the filter has size equal to the pooling
--- kernel size and contains all zeros.
--- 
--- Note on duality: The dilation of `input` by the `filter` is equal to the
--- negation of the erosion of `-input` by the reflected `filter`.
-dilation2D :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                      Data.Int.Int32,
-                                                      Data.Int.Int64,
-                                                      Data.Int.Int8,
-                                                      Data.Word.Word16,
-                                                      Data.Word.Word8, Double,
-                                                      Float] t) =>
-              Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
-              -> Tensor v2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
-              -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, out_height, out_width, depth]`.
-dilation2D input filter | eqLengthGuard [] =
-    buildOp (opDef "Dilation2D"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input filter
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the input\ntensor. Must be: `[1, stride_height, stride_width, 1]`."
-  has_minimum: true
-  minimum: 4
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  description: "The input stride for atrous morphological dilation. Must be:\n`[1, rate_height, rate_width, 1]`."
-  has_minimum: true
-  minimum: 4
-  name: "rates"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "4-D with shape `[batch, in_height, in_width, depth]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "3-D with shape `[filter_height, filter_width, depth]`."
-  name: "filter"
-  type_attr: "T"
-}
-output_arg {
-  description: "4-D with shape `[batch, out_height, out_width, depth]`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes the gradient for the rsqrt of `x` wrt its input.
---
--- Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
--- is the corresponding input gradient.
-rsqrtGrad :: forall v1 v2 t . (TensorType t,
-                               OneOf '[(Data.Complex.Complex Double),
-                                       (Data.Complex.Complex Float),
-                                       Data.Word.Word16, Double, Float] t) =>
-             Tensor v1 t -- ^ __x__
-             -> Tensor v2 t -- ^ __y__
-             -> Tensor Value t -- ^ __z__
-rsqrtGrad x y | eqLengthGuard [] =
-    buildOp (opDef "RsqrtGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Computes reciprocal of square root of x element-wise.
---
--- I.e., \\(y = 1 / \sqrt{x}\\).
-rsqrt :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                              (Data.Complex.Complex Float),
-                                              Data.Word.Word16, Double,
-                                              Float] t) =>
-         Tensor v1 t -- ^ __x__
-         -> Tensor Value t -- ^ __y__
-rsqrt x | eqLengthGuard [] =
-    buildOp (opDef "Rsqrt"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Produces the max pool of the input tensor for quantized types.
-
-quantizedMaxPool :: forall v1 v2 v3 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                               Data.Int.Int32,
-                                                               Data.Word.Word16,
-                                                               Data.Word.Word8] t) =>
-                    Tensor v1 t -- ^ __input__: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
-                    -> Tensor v2 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
-                    -> Tensor v3 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
-                    -> (Tensor Value t, Tensor Value Float, Tensor Value Float)
-                    -- ^ (__output__, __min_output__, __max_output__)
-                    --
-                    -- * __output__
-                    --
-                    -- * __min_output__: The float value that the lowest quantized output value represents.
-                    --
-                    -- * __max_output__: The float value that the highest quantized output value represents.
-quantizedMaxPool input min_input max_input | eqLengthGuard [] =
-    buildOp (opDef "QuantizedMaxPool"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input min_input max_input
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input."
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the input\ntensor. The length must be 4 to match the number of dimensions of the input."
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "The 4D (batch x rows x cols x depth) Tensor to MaxReduce over."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "The float value that the lowest quantized input value represents."
-  name: "min_input"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the highest quantized input value represents."
-  name: "max_input"
-  type: DT_FLOAT
-}
-output_arg { name: "output" type_attr: "T" }
-output_arg {
-  description: "The float value that the lowest quantized output value represents."
-  name: "min_output"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The float value that the highest quantized output value represents."
-  name: "max_output"
-  type: DT_FLOAT
-}
--}
-
--- | Computes square root of x element-wise.
---
--- I.e., \\(y = \sqrt{x} = x^{1/2}\\).
-sqrt :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                             (Data.Complex.Complex Float),
-                                             Data.Word.Word16, Double,
-                                             Float] t) => Tensor v1 t -- ^ __x__
-        -> Tensor Value t -- ^ __y__
-sqrt x | eqLengthGuard [] =
-    buildOp (opDef "Sqrt"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | A Reader that outputs the queued work as both the key and value.
---
--- To use, enqueue strings in a Queue.  ReaderRead will take the front
--- work string and output (work, work).
-identityReader :: Build (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
-identityReader  | eqLengthGuard [] =
-    buildOp (opDef "IdentityReader")
-        
-{-
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
-  name: "shared_name"
-  type: "string"
-}
-output_arg {
-  description: "The handle to reference the Reader."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
--}
-
--- | Computes square of x element-wise.
---
--- I.e., \\(y = x * x = x^2\\).
-square :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                               (Data.Complex.Complex Float),
-                                               Data.Int.Int32, Data.Int.Int64,
-                                               Data.Word.Word16, Double,
-                                               Float] t) =>
-          Tensor v1 t -- ^ __x__
-          -> Tensor Value t -- ^ __y__
-square x | eqLengthGuard [] =
-    buildOp (opDef "Square"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Reshapes a quantized tensor as per the Reshape op.
---
--- ```
-quantizedReshape :: forall v1 v2 v3 v4 t tshape . (TensorType t,
-                                                   TensorType tshape,
-                                                   OneOf '[Data.Int.Int32,
-                                                           Data.Int.Int64] tshape) =>
-                    Tensor v1 t -- ^ __tensor__
-                    -> Tensor v2 tshape -- ^ __shape__: Defines the shape of the output tensor.
-                    -> Tensor v3 Float -- ^ __input_min__: The minimum value of the input.
-                    -> Tensor v4 Float -- ^ __input_max__: The maximum value of the input.
-                    -> (Tensor Value t, Tensor Value Float, Tensor Value Float)
-                    -- ^ (__output__, __output_min__, __output_max__)
-                    --
-                    -- * __output__
-                    --
-                    -- * __output_min__: This value is copied from input_min.
-                    --
-                    -- * __output_max__: This value is copied from input_max.
-quantizedReshape tensor shape input_min input_max | eqLengthGuard [] =
-    buildOp (opDef "QuantizedReshape"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tshape" .~ tensorType (undefined :: tshape))
-        tensor shape input_min input_max
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tshape"
-  type: "type"
-}
-input_arg { name: "tensor" type_attr: "T" }
-input_arg {
-  description: "Defines the shape of the output tensor."
-  name: "shape"
-  type_attr: "Tshape"
-}
-input_arg {
-  description: "The minimum value of the input."
-  name: "input_min"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The maximum value of the input."
-  name: "input_max"
-  type: DT_FLOAT
-}
-output_arg { name: "output" type_attr: "T" }
-output_arg {
-  description: "This value is copied from input_min."
-  name: "output_min"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "This value is copied from input_max."
-  name: "output_max"
-  type: DT_FLOAT
-}
--}
-
--- | Computes the gradient for the inverse of `x` wrt its input.
---
--- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
--- is the corresponding input gradient.
-reciprocalGrad :: forall v1 v2 t . (TensorType t,
-                                    OneOf '[(Data.Complex.Complex Double),
-                                            (Data.Complex.Complex Float),
-                                            Data.Word.Word16, Double,
-                                            Float] t) => Tensor v1 t -- ^ __x__
-                  -> Tensor v2 t -- ^ __y__
-                  -> Tensor Value t -- ^ __z__
-reciprocalGrad x y | eqLengthGuard [] =
-    buildOp (opDef "ReciprocalGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Computes the gradient for the inverse of `x` wrt its input.
---
--- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
--- is the corresponding input gradient.
-invGrad :: forall v1 v2 t . (TensorType t,
-                             OneOf '[(Data.Complex.Complex Double),
-                                     (Data.Complex.Complex Float),
-                                     Data.Word.Word16, Double, Float] t) =>
-           Tensor v1 t -- ^ __x__
-           -> Tensor v2 t -- ^ __y__
-           -> Tensor Value t -- ^ __z__
-invGrad x y | eqLengthGuard [] =
-    buildOp (opDef "InvGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Computes the reciprocal of x element-wise.
---
--- I.e., \\(y = 1 / x\\).
-inv :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                            (Data.Complex.Complex Float),
-                                            Data.Int.Int32, Data.Int.Int64,
-                                            Data.Word.Word16, Double,
-                                            Float] t) => Tensor v1 t -- ^ __x__
-       -> Tensor Value t -- ^ __y__
-inv x | eqLengthGuard [] =
-    buildOp (opDef "Inv"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Concat the elements from the TensorArray into value `value`.
---
--- Takes `T` elements of shapes
--- 
---   ```
---   (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
---   ```
--- 
--- and concatenates them into a Tensor of shape:
--- 
---   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
--- 
--- All elements must have the same shape (excepting the first dimension).
-tensorArrayConcatV2 :: forall v1 v2 dtype . (TensorType dtype) =>
-                       Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
-                       -> Tensor v2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
-                       -> (Tensor Value dtype, Tensor Value Data.Int.Int64)
-                       -- ^ (__value__, __lengths__)
-                       --
-                       -- * __value__: All of the elements in the TensorArray, concatenated along the first
-                       -- axis.
-                       --
-                       -- * __lengths__: A vector of the row sizes of the original T elements in the
-                       -- value output.  In the example above, this would be the values:
-                       -- `(n1, n2, ..., n(T-1))`.
-tensorArrayConcatV2 handle flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayConcatV2"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        handle flow_in
-{-
-attr {
-  description: "The type of the elem that is returned."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  default_value { shape { unknown_rank: true } }
-  description: "The expected shape of an element, if known,\nexcluding the first dimension. Used to validate the shapes of\nTensorArray elements. If this shape is not fully specified, concatenating\nzero-size TensorArrays is an error."
-  name: "element_shape_except0"
-  type: "shape"
-}
-input_arg {
-  description: "The handle to a TensorArray."
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "A float scalar that enforces proper chaining of operations."
-  name: "flow_in"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "All of the elements in the TensorArray, concatenated along the first\naxis."
-  name: "value"
-  type_attr: "dtype"
-}
-output_arg {
-  description: "A vector of the row sizes of the original T elements in the\nvalue output.  In the example above, this would be the values:\n`(n1, n2, ..., n(T-1))`."
-  name: "lengths"
-  type: DT_INT64
-}
--}
-
--- | Computes the complex absolute value of a tensor.
---
--- Given a tensor `x` of complex numbers, this operation returns a tensor of type
--- `float` or `double` that is the absolute value of each element in `x`. All
--- elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
--- value is computed as \\( \sqrt{a^2 + b^2}\\).
--- 
--- For example:
--- 
--- ```
--- # tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
--- tf.complex_abs(x) ==> [5.25594902, 6.60492229]
--- ```
-complexAbs :: forall v1 t tout . (TensorType t,
-                                  OneOf '[(Data.Complex.Complex Double),
-                                          (Data.Complex.Complex Float)] t,
-                                  TensorType tout, OneOf '[Double,
-                                                           Float] tout) =>
-              Tensor v1 t -- ^ __x__
-              -> Tensor Value tout -- ^ __y__
-complexAbs x | eqLengthGuard [] =
-    buildOp (opDef "ComplexAbs"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tout" .~ tensorType (undefined :: tout))
-        x
-{-
-attr {
-  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
-  default_value { type: DT_COMPLEX64 }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  default_value { type: DT_FLOAT }
-  name: "Tout"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "Tout" }
--}
-
--- | Cast x of type SrcT to y of DstT.
---
--- _HostCast requires its input and produces its output in host memory.
-_HostCast :: forall v1 srcT dstT . (TensorType srcT, TensorType dstT) =>
-             Tensor v1 srcT -- ^ __x__
-             -> Tensor Value dstT -- ^ __y__
-_HostCast x | eqLengthGuard [] =
-    buildOp (opDef "_HostCast"
-             & opAttr "SrcT" .~ tensorType (undefined :: srcT)
-             & opAttr "DstT" .~ tensorType (undefined :: dstT))
-        x
-{-
-attr { name: "SrcT" type: "type" }
-attr { name: "DstT" type: "type" }
-input_arg { name: "x" type_attr: "SrcT" }
-output_arg { name: "y" type_attr: "DstT" }
--}
-
--- | Resize `images` to `size` using nearest neighbor interpolation.
-
-resizeNearestNeighbor :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                                 Data.Int.Int32,
-                                                                 Data.Int.Int64,
-                                                                 Data.Int.Int8,
-                                                                 Data.Word.Word16,
-                                                                 Data.Word.Word8,
-                                                                 Double,
-                                                                 Float] t) =>
-                         Tensor v1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
-                         -> Tensor v2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
-                                                     -- new size for the images.
-                         -> Tensor Value t -- ^ __resized_images__: 4-D with shape
-                         -- `[batch, new_height, new_width, channels]`.
-resizeNearestNeighbor images size | eqLengthGuard [] =
-    buildOp (opDef "ResizeNearestNeighbor"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        images size
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
-  name: "align_corners"
-  type: "bool"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "images"
-  type_attr: "T"
-}
-input_arg {
-  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
-  name: "size"
-  type: DT_INT32
-}
-output_arg {
-  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
-  name: "resized_images"
-  type_attr: "T"
-}
--}
-
--- | Deprecated. Disallowed in GraphDef version >= 2.
-
-adjustContrast :: forall v1 v2 v3 v4 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                                Data.Int.Int32,
-                                                                Data.Int.Int64,
-                                                                Data.Int.Int8,
-                                                                Data.Word.Word8,
-                                                                Double,
-                                                                Float] t) =>
-                  Tensor v1 t -- ^ __images__
-                  -> Tensor v2 Float -- ^ __contrast_factor__
-                  -> Tensor v3 Float -- ^ __min_value__
-                  -> Tensor v4 Float -- ^ __max_value__
-                  -> Tensor Value Float -- ^ __output__
-adjustContrast images contrast_factor min_value max_value | eqLengthGuard [] =
-    buildOp (opDef "AdjustContrast"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        images contrast_factor min_value max_value
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_FLOAT
-      type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "images" type_attr: "T" }
-input_arg { name: "contrast_factor" type: DT_FLOAT }
-input_arg { name: "min_value" type: DT_FLOAT }
-input_arg { name: "max_value" type: DT_FLOAT }
-output_arg { name: "output" type: DT_FLOAT }
--}
-
--- | 
-
-batchMatrixDiagPart :: forall v1 t . (TensorType t) =>
-                       Tensor v1 t -- ^ __input__
-                       -> Tensor Value t -- ^ __diagonal__
-batchMatrixDiagPart input | eqLengthGuard [] =
-    buildOp (opDef "BatchMatrixDiagPart"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { name: "T" type: "type" }
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "diagonal" type_attr: "T" }
--}
-
--- | 
-
-batchMatrixSetDiag :: forall v1 v2 t . (TensorType t) =>
-                      Tensor v1 t -- ^ __input__
-                      -> Tensor v2 t -- ^ __diagonal__
-                      -> Tensor Value t -- ^ __output__
-batchMatrixSetDiag input diagonal | eqLengthGuard [] =
-    buildOp (opDef "BatchMatrixSetDiag"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input diagonal
-{-
-attr { name: "T" type: "type" }
-input_arg { name: "input" type_attr: "T" }
-input_arg { name: "diagonal" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | 
-
-batchMatrixDiag :: forall v1 t . (TensorType t) => Tensor v1 t -- ^ __diagonal__
-                   -> Tensor Value t -- ^ __output__
-batchMatrixDiag diagonal | eqLengthGuard [] =
-    buildOp (opDef "BatchMatrixDiag"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        diagonal
-{-
-attr { name: "T" type: "type" }
-input_arg { name: "diagonal" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
-
-fakeQuantWithMinMaxVarsPerChannelGradient :: Tensor v1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
-                                                             -- shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.
-                                             -> Tensor v2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
-                                                                --   same as `gradients`.
-                                                                -- min, max: Quantization interval, floats of shape `[d]`.
-                                             -> Tensor v3 Float -- ^ __min__
-                                             -> Tensor v4 Float -- ^ __max__
-                                             -> (Tensor Value Float,
-                                                 Tensor Value Float,
-                                                 Tensor Value Float)
-                                             -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__)
-                                             --
-                                             -- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs, shape same as
-                                             -- `inputs`:
-                                             --   `gradients * (inputs >= min && inputs <= max)`.
-                                             --
-                                             -- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter, shape `[d]`:
-                                             -- `sum_per_d(gradients * (inputs < min))`.
-                                             --
-                                             -- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter, shape `[d]`:
-                                             -- `sum_per_d(gradients * (inputs > max))`.
-fakeQuantWithMinMaxVarsPerChannelGradient gradients inputs min
-                                          max | eqLengthGuard [] =
-    buildOp (opDef "FakeQuantWithMinMaxVarsPerChannelGradient")
-        gradients inputs min max
-{-
-input_arg {
-  description: "Backpropagated gradients above the FakeQuantWithMinMaxVars operation,\nshape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`."
-  name: "gradients"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape\n  same as `gradients`.\nmin, max: Quantization interval, floats of shape `[d]`."
-  name: "inputs"
-  type: DT_FLOAT
-}
-input_arg { name: "min" type: DT_FLOAT }
-input_arg { name: "max" type: DT_FLOAT }
-output_arg {
-  description: "Backpropagated gradients w.r.t. inputs, shape same as\n`inputs`:\n  `gradients * (inputs >= min && inputs <= max)`."
-  name: "backprops_wrt_input"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "Backpropagated gradients w.r.t. min parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs < min))`."
-  name: "backprop_wrt_min"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "Backpropagated gradients w.r.t. max parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs > max))`."
-  name: "backprop_wrt_max"
-  type: DT_FLOAT
-}
--}
-
--- | Computes gradients for SparseSegmentSqrtN.
---
--- Returns tensor "output" with same shape as grad, except for dimension 0 whose
--- value is output_dim0.
-sparseSegmentSqrtNGrad :: forall v1 v2 v3 v4 t tidx . (TensorType t,
-                                                       OneOf '[Double, Float] t,
-                                                       TensorType tidx,
-                                                       OneOf '[Data.Int.Int32,
-                                                               Data.Int.Int64] tidx) =>
-                          Tensor v1 t -- ^ __grad__: gradient propagated to the SparseSegmentSqrtN op.
-                          -> Tensor v2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentSqrtN op.
-                          -> Tensor v3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentSqrtN op.
-                          -> Tensor v4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentSqrtN op.
-                          -> Tensor Value t -- ^ __output__
-sparseSegmentSqrtNGrad grad indices segment_ids output_dim0 | eqLengthGuard [] =
-    buildOp (opDef "SparseSegmentSqrtNGrad"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        grad indices segment_ids output_dim0
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "gradient propagated to the SparseSegmentSqrtN op."
-  name: "grad"
-  type_attr: "T"
-}
-input_arg {
-  description: "indices passed to the corresponding SparseSegmentSqrtN op."
-  name: "indices"
-  type_attr: "Tidx"
-}
-input_arg {
-  description: "segment_ids passed to the corresponding SparseSegmentSqrtN op."
-  name: "segment_ids"
-  type: DT_INT32
-}
-input_arg {
-  description: "dimension 0 of \"data\" passed to SparseSegmentSqrtN op."
-  name: "output_dim0"
-  type: DT_INT32
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
---
--- `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
--- to 'outputs' tensor of same shape as `inputs`.
--- 
--- [min; max] is the clamping range for the 'inputs' data in the corresponding
--- depth channel.  Op divides this range into 255 steps (total of 256 values), then
--- replaces each 'inputs' value with the closest of the quantized step values.
--- 
--- This operation has a gradient and thus allows for training `min` and `max` values.
-fakeQuantWithMinMaxVarsPerChannel :: Tensor v1 Float -- ^ __inputs__
-                                     -> Tensor v2 Float -- ^ __min__
-                                     -> Tensor v3 Float -- ^ __max__
-                                     -> Tensor Value Float -- ^ __outputs__
-fakeQuantWithMinMaxVarsPerChannel inputs min max | eqLengthGuard [] =
-    buildOp (opDef "FakeQuantWithMinMaxVarsPerChannel")
-        inputs min max
-{-
-input_arg { name: "inputs" type: DT_FLOAT }
-input_arg { name: "min" type: DT_FLOAT }
-input_arg { name: "max" type: DT_FLOAT }
-output_arg { name: "outputs" type: DT_FLOAT }
--}
-
--- | Outputs a `Summary` protocol buffer with scalar values.
---
--- The input `tags` and `values` must have the same shape.  The generated summary
--- has a summary value for each tag-value pair in `tags` and `values`.
-scalarSummary :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                         Data.Int.Int32,
-                                                         Data.Int.Int64,
-                                                         Data.Int.Int8,
-                                                         Data.Word.Word16,
-                                                         Data.Word.Word8,
-                                                         Double, Float] t) =>
-                 Tensor v1 Data.ByteString.ByteString -- ^ __tags__: Tags for the summary.
-                 -> Tensor v2 t -- ^ __values__: Same shape as `tags.  Values for the summary.
-                 -> Tensor Value Data.ByteString.ByteString -- ^ __summary__: Scalar.  Serialized `Summary` protocol buffer.
-scalarSummary tags values | eqLengthGuard [] =
-    buildOp (opDef "ScalarSummary"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        tags values
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Tags for the summary." name: "tags" type: DT_STRING
-}
-input_arg {
-  description: "Same shape as `tags.  Values for the summary."
-  name: "values"
-  type_attr: "T"
-}
-output_arg {
-  description: "Scalar.  Serialized `Summary` protocol buffer."
-  name: "summary"
-  type: DT_STRING
-}
--}
-
--- | Computes numerical negative value element-wise.
---
--- I.e., \\(y = -x\\).
-neg :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                            (Data.Complex.Complex Float),
-                                            Data.Int.Int32, Data.Int.Int64,
-                                            Data.Word.Word16, Double,
-                                            Float] t) => Tensor v1 t -- ^ __x__
-       -> Tensor Value t -- ^ __y__
-neg x | eqLengthGuard [] =
-    buildOp (opDef "Neg"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Compute gradients for a FakeQuantWithMinMaxArgs operation.
-
-fakeQuantWithMinMaxArgsGradient :: Tensor v1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
-                                   -> Tensor v2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
-                                   -> Tensor Value Float -- ^ __backprops__: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
-                                   -- `gradients * (inputs >= min && inputs <= max)`.
-fakeQuantWithMinMaxArgsGradient gradients inputs | eqLengthGuard [] =
-    buildOp (opDef "FakeQuantWithMinMaxArgsGradient")
-        gradients inputs
-{-
-attr { default_value { f: -6.0 } name: "min" type: "float" }
-attr { default_value { f: 6.0 } name: "max" type: "float" }
-input_arg {
-  description: "Backpropagated gradients above the FakeQuantWithMinMaxArgs operation."
-  name: "gradients"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "Values passed as inputs to the FakeQuantWithMinMaxArgs operation."
-  name: "inputs"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:\n`gradients * (inputs >= min && inputs <= max)`."
-  name: "backprops"
-  type: DT_FLOAT
-}
--}
-
--- | Debug NaN Value Counter Op
---
--- Counts number of NaNs in the input tensor, for debugging.
-debugNanCount :: forall v1 t . (TensorType t) =>
-                 Tensor v1 t -- ^ __input__: Input tensor, non-Reference type.
-                 -> Tensor Value Data.Int.Int64 -- ^ __output__: An integer output tensor that is the number of NaNs in the input.
-debugNanCount input | eqLengthGuard [] =
-    buildOp (opDef "DebugNanCount"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { name: "T" type: "type" }
-attr {
-  default_value { s: "" }
-  description: "Name of the input tensor."
-  name: "tensor_name"
-  type: "string"
-}
-attr {
-  default_value { list { } }
-  description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011"
-  name: "debug_urls"
-  type: "list(string)"
-}
-input_arg {
-  description: "Input tensor, non-Reference type."
-  name: "input"
-  type_attr: "T"
-}
-output_arg {
-  description: "An integer output tensor that is the number of NaNs in the input."
-  name: "output"
-  type: DT_INT64
-}
--}
-
--- | Debug Identity Op.
---
--- Provides an identity mapping of the non-Ref type input tensor for debugging.
-debugIdentity :: forall v1 t . (TensorType t) =>
-                 Tensor v1 t -- ^ __input__: Input tensor, non-Reference type.
-                 -> Tensor Value t -- ^ __output__: Output tensor that equals the input tensor.
-debugIdentity input | eqLengthGuard [] =
-    buildOp (opDef "DebugIdentity"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { name: "T" type: "type" }
-attr {
-  default_value { s: "" }
-  description: "Name of the input tensor."
-  name: "tensor_name"
-  type: "string"
-}
-attr {
-  default_value { list { } }
-  description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011"
-  name: "debug_urls"
-  type: "list(string)"
-}
-input_arg {
-  description: "Input tensor, non-Reference type."
-  name: "input"
-  type_attr: "T"
-}
-output_arg {
-  description: "Output tensor that equals the input tensor."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Bitcasts a tensor from one type to another without copying data.
---
--- Given a tensor `input`, this operation returns a tensor that has the same buffer
--- data as `input` with datatype `type`.
--- 
--- If the input datatype `T` is larger than the output datatype `type` then the
--- shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
--- 
--- If `T` is smaller than `type`, the operator requires that the rightmost
--- dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
--- [..., sizeof(`type`)/sizeof(`T`)] to [...].
--- 
--- *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
--- endian orderings will give different results.
-bitcast :: forall v1 t type' . (TensorType t,
-                                OneOf '[(Data.Complex.Complex Double),
-                                        (Data.Complex.Complex Float),
-                                        Data.Int.Int16, Data.Int.Int32,
-                                        Data.Int.Int64, Data.Int.Int8,
-                                        Data.Word.Word16, Data.Word.Word8,
-                                        Double, Float] t, TensorType type',
-                                OneOf '[(Data.Complex.Complex Double),
-                                        (Data.Complex.Complex Float),
-                                        Data.Int.Int16, Data.Int.Int32,
-                                        Data.Int.Int64, Data.Int.Int8,
-                                        Data.Word.Word16, Data.Word.Word8,
-                                        Double, Float] type') =>
-           Tensor v1 t -- ^ __input__
-           -> Tensor Value type' -- ^ __output__
-bitcast input | eqLengthGuard [] =
-    buildOp (opDef "Bitcast"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "type" .~ tensorType (undefined :: type'))
-        input
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "type"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "type" }
--}
-
--- | Computes sigmoid of `x` element-wise.
---
--- Specifically, `y = 1 / (1 + exp(-x))`.
-sigmoid :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                (Data.Complex.Complex Float),
-                                                Data.Word.Word16, Double,
-                                                Float] t) =>
-           Tensor v1 t -- ^ __x__
-           -> Tensor Value t -- ^ __y__
-sigmoid x | eqLengthGuard [] =
-    buildOp (opDef "Sigmoid"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Copy Op.
---
--- Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
--- device on which the tensor is allocated.
--- 
--- Unlike the CopyHost Op, this op does not have HostMemory constraint on its
--- input or output.
-copy :: forall v1 t . (TensorType t) =>
-        Tensor v1 t -- ^ __input__: Input tensor.
-        -> Tensor Value t -- ^ __output__: Output tensor, deep-copied from input.
-copy input | eqLengthGuard [] =
-    buildOp (opDef "Copy"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { name: "T" type: "type" }
-attr {
-  default_value { s: "" }
-  description: "The name of the input tensor."
-  name: "tensor_name"
-  type: "string"
-}
-input_arg {
-  description: "Input tensor." name: "input" type_attr: "T"
-}
-output_arg {
-  description: "Output tensor, deep-copied from input."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Generates labels for candidate sampling with a learned unigram distribution.
---
--- A unigram sampler could use a fixed unigram distribution read from a
--- file or passed in as an in-memory array instead of building up the distribution
--- from data on the fly. There is also an option to skew the distribution by
--- applying a distortion power to the weights.
--- 
--- The vocabulary file should be in CSV-like format, with the last field
--- being the weight associated with the word.
--- 
--- For each batch, this op picks a single set of sampled candidate labels.
--- 
--- The advantages of sampling candidates per-batch are simplicity and the
--- possibility of efficient dense matrix multiplication. The disadvantage is that
--- the sampled candidates must be chosen independently of the context and of the
--- true labels.
-fixedUnigramCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch.
-                                -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
-                                -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
-                                -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
-                                        -- candidates in a batch are unique. This requires some approximation to
-                                        -- estimate the post-rejection sampling probabilities.
-                                -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
-                                                            -- IDs of the num_true target_classes in the corresponding original label.
-                                -> (Tensor Value Data.Int.Int64,
-                                    Tensor Value Float, Tensor Value Float)
-                                -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
-                                --
-                                -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
-                                -- the ID of a sampled candidate.
-                                --
-                                -- * __true_expected_count__: A batch_size * num_true matrix, representing
-                                -- the number of times each candidate is expected to occur in a batch
-                                -- of sampled candidates. If unique=true, then this is a probability.
-                                --
-                                -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
-                                -- candidate representing the number of times the candidate is expected
-                                -- to occur in a batch of sampled candidates.  If unique=true, then this is a
-                                -- probability.
-fixedUnigramCandidateSampler num_sampled num_true range_max unique
-                             true_classes | eqLengthGuard [] =
-    buildOp (opDef "FixedUnigramCandidateSampler"
-             & opAttr "num_sampled" .~ num_sampled
-             & opAttr "num_true" .~ num_true
-             & opAttr "range_max" .~ range_max
-             & opAttr "unique" .~ unique)
-        true_classes
-{-
-attr {
-  description: "Number of true labels per context."
-  has_minimum: true
-  minimum: 1
-  name: "num_true"
-  type: "int"
-}
-attr {
-  description: "Number of candidates to randomly sample per batch."
-  has_minimum: true
-  minimum: 1
-  name: "num_sampled"
-  type: "int"
-}
-attr {
-  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
-  name: "unique"
-  type: "bool"
-}
-attr {
-  description: "The sampler will sample integers from the interval [0, range_max)."
-  has_minimum: true
-  minimum: 1
-  name: "range_max"
-  type: "int"
-}
-attr {
-  default_value { s: "" }
-  description: "Each valid line in this file (which should have a CSV-like format)\ncorresponds to a valid word ID. IDs are in sequential order, starting from\nnum_reserved_ids. The last entry in each line is expected to be a value\ncorresponding to the count or relative probability. Exactly one of vocab_file\nand unigrams needs to be passed to this op."
-  name: "vocab_file"
-  type: "string"
-}
-attr {
-  default_value { f: 1.0 }
-  description: "The distortion is used to skew the unigram probability distribution.\nEach weight is first raised to the distortion\'s power before adding to the\ninternal unigram distribution. As a result, distortion = 1.0 gives regular\nunigram sampling (as defined by the vocab file), and distortion = 0.0 gives\na uniform distribution."
-  name: "distortion"
-  type: "float"
-}
-attr {
-  default_value { i: 0 }
-  description: "Optionally some reserved IDs can be added in the range [0,\n..., num_reserved_ids) by the users. One use case is that a special unknown\nword token is used as ID 0. These IDs will have a sampling probability of 0."
-  name: "num_reserved_ids"
-  type: "int"
-}
-attr {
-  default_value { i: 1 }
-  description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'shard\') indicates the number of partitions that are being\nused in the overall computation."
-  has_minimum: true
-  minimum: 1
-  name: "num_shards"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'num_shards\') indicates the particular partition number of a\nsampler op, when partitioning is being used."
-  has_minimum: true
-  name: "shard"
-  type: "int"
-}
-attr {
-  default_value { list { } }
-  description: "A list of unigram counts or probabilities, one per ID in sequential\norder. Exactly one of vocab_file and unigrams should be passed to this op."
-  name: "unigrams"
-  type: "list(float)"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "An second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-input_arg {
-  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
-  name: "true_classes"
-  type: DT_INT64
-}
-output_arg {
-  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
-  name: "sampled_candidates"
-  type: DT_INT64
-}
-output_arg {
-  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
-  name: "true_expected_count"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
-  name: "sampled_expected_count"
-  type: DT_FLOAT
-}
--}
-
--- | Computes the difference between two lists of numbers or strings.
---
--- Given a list `x` and a list `y`, this operation returns a list `out` that
--- represents all values that are in `x` but not in `y`. The returned list `out`
--- is sorted in the same order that the numbers appear in `x` (duplicates are
--- preserved). This operation also returns a list `idx` that represents the
--- position of each `out` element in `x`. In other words:
--- 
--- `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
--- 
--- For example, given this input:
--- 
--- ```prettyprint
--- x = [1, 2, 3, 4, 5, 6]
--- y = [1, 3, 5]
--- ```
--- 
--- This operation would return:
--- 
--- ```prettyprint
--- out ==> [2, 4, 6]
--- idx ==> [1, 3, 5]
--- ```
-listDiff :: forall v1 v2 t out_idx . (TensorType t, TensorType out_idx,
-                                      OneOf '[Data.Int.Int32,
-                                              Data.Int.Int64] out_idx) =>
-            Tensor v1 t -- ^ __x__: 1-D. Values to keep.
-            -> Tensor v2 t -- ^ __y__: 1-D. Values to remove.
-            -> (Tensor Value t, Tensor Value out_idx) -- ^ (__out__, __idx__)
-            --
-            -- * __out__: 1-D. Values present in `x` but not in `y`.
-            --
-            -- * __idx__: 1-D. Positions of `x` values preserved in `out`.
-listDiff x y | eqLengthGuard [] =
-    buildOp (opDef "ListDiff"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "out_idx" .~ tensorType (undefined :: out_idx))
-        x y
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "out_idx"
-  type: "type"
-}
-input_arg {
-  description: "1-D. Values to keep." name: "x" type_attr: "T"
-}
-input_arg {
-  description: "1-D. Values to remove." name: "y" type_attr: "T"
-}
-output_arg {
-  description: "1-D. Values present in `x` but not in `y`."
-  name: "out"
-  type_attr: "T"
-}
-output_arg {
-  description: "1-D. Positions of `x` values preserved in `out`."
-  name: "idx"
-  type_attr: "out_idx"
-}
--}
-
--- | Extract `patches` from `images` and put them in the "depth" output dimension.
-
-extractImagePatches :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                            Data.Int.Int32,
-                                                            Data.Int.Int64,
-                                                            Data.Int.Int8,
-                                                            Data.Word.Word16,
-                                                            Data.Word.Word8,
-                                                            Double, Float] t) =>
-                       Tensor v1 t -- ^ __images__: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
-                       -> Tensor Value t -- ^ __patches__: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
-                       -- ksize_cols * depth]` containing image patches with size
-                       -- `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension.
-extractImagePatches images | eqLengthGuard [] =
-    buildOp (opDef "ExtractImagePatches"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        images
-{-
-attr {
-  description: "The size of the sliding window for each dimension of `images`."
-  has_minimum: true
-  minimum: 4
-  name: "ksizes"
-  type: "list(int)"
-}
-attr {
-  description: "1-D of length 4. How far the centers of two consecutive patches are in\nthe images. Must be: `[1, stride_rows, stride_cols, 1]`."
-  has_minimum: true
-  minimum: 4
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  description: "1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the\ninput stride, specifying how far two consecutive patch samples are in the\ninput. Equivalent to extracting patches with\n`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by\nsubsampling them spatially by a factor of `rates`."
-  has_minimum: true
-  minimum: 4
-  name: "rates"
-  type: "list(int)"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use.\n\nWe specify the size-related attributes as:\n\n```python\n      ksizes = [1, ksize_rows, ksize_cols, 1]\n      strides = [1, strides_rows, strides_cols, 1]\n      rates = [1, rates_rows, rates_cols, 1]\n```"
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "4-D Tensor with shape `[batch, in_rows, in_cols, depth]`."
-  name: "images"
-  type_attr: "T"
-}
-output_arg {
-  description: "4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *\nksize_cols * depth]` containing image patches with size\n`ksize_rows x ksize_cols x depth` vectorized in the \"depth\" dimension."
-  name: "patches"
-  type_attr: "T"
-}
--}
-
--- | SpaceToDepth for tensors of type T.
---
--- Rearranges blocks of spatial data, into depth. More specifically,
--- this op outputs a copy of the input tensor where values from the `height`
--- and `width` dimensions are moved to the `depth` dimension.
--- The attr `block_size` indicates the input block size and how the data is moved.
--- 
---   * Non-overlapping blocks of size `block_size x block size` are rearranged
---     into depth at each location.
---   * The depth of the output tensor is `input_depth * block_size * block_size`.
---   * The input tensor's height and width must be divisible by block_size.
--- 
--- That is, assuming the input is in the shape:
--- `[batch, height, width, depth]`,
--- the shape of the output will be:
--- `[batch, height/block_size, width/block_size, depth*block_size*block_size]`
--- 
--- This operation requires that the input tensor be of rank 4, and that
--- `block_size` be >=1 and a divisor of both the input `height` and `width`.
--- 
--- This operation is useful for resizing the activations between convolutions
--- (but keeping all data), e.g. instead of pooling. It is also useful for training
--- purely convolutional models.
--- 
--- For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2:
--- 
--- ```prettyprint
--- x = [[[[1], [2]],
---       [[3], [4]]]]
--- ```
--- 
--- This operation will output a tensor of shape `[1, 1, 1, 4]`:
--- 
--- ```prettyprint
--- [[[[1, 2, 3, 4]]]]
--- ```
--- 
--- Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
--- the corresponding output will have a single element (i.e. width and height are
--- both 1) and will have a depth of 4 channels (1 * block_size * block_size).
--- The output element shape is `[1, 1, 4]`.
--- 
--- For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
--- 
--- ```prettyprint
--- x = [[[[1, 2, 3], [4, 5, 6]],
---       [[7, 8, 9], [10, 11, 12]]]]
--- ```
--- 
--- This operation, for block_size of 2, will return the following tensor of shape
--- `[1, 1, 1, 12]`
--- 
--- ```prettyprint
--- [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
--- ```
--- 
--- Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
--- 
--- ```prettyprint
--- x = [[[[1],   [2],  [5],  [6]],
---       [[3],   [4],  [7],  [8]],
---       [[9],  [10], [13],  [14]],
---       [[11], [12], [15],  [16]]]]
--- ```
--- 
--- the operator will return the following tensor of shape `[1 2 2 4]`:
--- 
--- ```prettyprint
--- x = [[[[1, 2, 3, 4],
---        [5, 6, 7, 8]],
---       [[9, 10, 11, 12],
---        [13, 14, 15, 16]]]]
--- ```
-spaceToDepth :: forall v1 t . (TensorType t) =>
-                Data.Int.Int64 -- ^ __block_size__: The size of the spatial block.
-                -> Tensor v1 t -- ^ __input__
-                -> Tensor Value t -- ^ __output__
-spaceToDepth block_size input | eqLengthGuard [] =
-    buildOp (opDef "SpaceToDepth"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "block_size" .~ block_size)
-        input
-{-
-attr { name: "T" type: "type" }
-attr {
-  description: "The size of the spatial block."
-  has_minimum: true
-  minimum: 2
-  name: "block_size"
-  type: "int"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
-
-cropAndResizeGradBoxes :: forall v1 v2 v3 v4 t . (TensorType t,
-                                                  OneOf '[Data.Int.Int16,
-                                                          Data.Int.Int32,
-                                                          Data.Int.Int64,
-                                                          Data.Int.Int8,
-                                                          Data.Word.Word16,
-                                                          Data.Word.Word8,
-                                                          Double, Float] t) =>
-                          Tensor v1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
-                          -> Tensor v2 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
-                                         -- Both `image_height` and `image_width` need to be positive.
-                          -> Tensor v3 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
-                                             -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
-                                             -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
-                                             -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
-                                             -- `[0, 1]` interval of normalized image height is mapped to
-                                             -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
-                                             -- which case the sampled crop is an up-down flipped version of the original
-                                             -- image. The width dimension is treated similarly. Normalized coordinates
-                                             -- outside the `[0, 1]` range are allowed, in which case we use
-                                             -- `extrapolation_value` to extrapolate the input image values.
-                          -> Tensor v4 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
-                                                      -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
-                          -> Tensor Value Float -- ^ __output__: A 2-D tensor of shape `[num_boxes, 4]`.
-cropAndResizeGradBoxes grads image boxes box_ind | eqLengthGuard [] =
-    buildOp (opDef "CropAndResizeGradBoxes"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        grads image boxes box_ind
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "bilinear" } }
-  default_value { s: "bilinear" }
-  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
-  name: "method"
-  type: "string"
-}
-input_arg {
-  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
-  name: "grads"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive."
-  name: "image"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
-  name: "boxes"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
-  name: "box_ind"
-  type: DT_INT32
-}
-output_arg {
-  description: "A 2-D tensor of shape `[num_boxes, 4]`."
-  name: "output"
-  type: DT_FLOAT
-}
--}
-
--- | BatchToSpace for N-D tensors of type T.
---
--- This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
--- `block_shape + [batch]`, interleaves these blocks back into the grid defined by
--- the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
--- the input.  The spatial dimensions of this intermediate result are then
--- optionally cropped according to `crops` to produce the output.  This is the
--- reverse of SpaceToBatch.  See below for a precise description.
-batchToSpaceND :: forall v1 v2 v3 t tblock_shape tcrops . (TensorType t,
-                                                           TensorType tblock_shape,
-                                                           OneOf '[Data.Int.Int32,
-                                                                   Data.Int.Int64] tblock_shape,
-                                                           TensorType tcrops,
-                                                           OneOf '[Data.Int.Int32,
-                                                                   Data.Int.Int64] tcrops) =>
-                  Tensor v1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
-                              -- where spatial_shape has M dimensions.
-                  -> Tensor v2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1.
-                  -> Tensor v3 tcrops -- ^ __crops__: 2-D with shape `[M, 2]`, all values must be >= 0.
-                                      --   `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
-                                      --   dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
-                                      --   required that
-                                      --   `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
-                                      -- 
-                                      -- This operation is equivalent to the following steps:
-                                      -- 
-                                      -- 1. Reshape `input` to `reshaped` of shape:
-                                      --      [block_shape[0], ..., block_shape[M-1],
-                                      --       batch / prod(block_shape),
-                                      --       input_shape[1], ..., input_shape[N-1]]
-                                      -- 
-                                      -- 2. Permute dimensions of `reshaped` to produce `permuted` of shape
-                                      --      [batch / prod(block_shape),
-                                      -- 
-                                      --       input_shape[1], block_shape[0],
-                                      --       ...,
-                                      --       input_shape[M], block_shape[M-1],
-                                      -- 
-                                      --       input_shape[M+1], ..., input_shape[N-1]]
-                                      -- 
-                                      -- 3. Reshape `permuted` to produce `reshaped_permuted` of shape
-                                      --      [batch / prod(block_shape),
-                                      -- 
-                                      --       input_shape[1] * block_shape[0],
-                                      --       ...,
-                                      --       input_shape[M] * block_shape[M-1],
-                                      -- 
-                                      --       input_shape[M+1],
-                                      --       ...,
-                                      --       input_shape[N-1]]
-                                      -- 
-                                      -- 4. Crop the start and end of dimensions `[1, ..., M]` of
-                                      --    `reshaped_permuted` according to `crops` to produce the output of shape:
-                                      --      [batch / prod(block_shape),
-                                      -- 
-                                      --       input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
-                                      --       ...,
-                                      --       input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
-                                      -- 
-                                      --       input_shape[M+1], ..., input_shape[N-1]]
-                                      -- 
-                                      -- Some examples:
-                                      -- 
-                                      -- (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
-                                      --     `crops = [[0, 0], [0, 0]]`:
-                                      -- 
-                                      -- ```prettyprint
-                                      -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
-                                      -- ```
-                                      -- 
-                                      -- The output tensor has shape `[1, 2, 2, 1]` and value:
-                                      -- 
-                                      -- ```prettyprint
-                                      -- x = [[[[1], [2]], [[3], [4]]]]
-                                      -- ```
-                                      -- 
-                                      -- (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
-                                      --     `crops = [[0, 0], [0, 0]]`:
-                                      -- 
-                                      -- ```prettyprint
-                                      -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
-                                      -- ```
-                                      -- 
-                                      -- The output tensor has shape `[1, 2, 2, 3]` and value:
-                                      -- 
-                                      -- ```prettyprint
-                                      -- x = [[[[1, 2, 3], [4, 5, 6]],
-                                      --       [[7, 8, 9], [10, 11, 12]]]]
-                                      -- ```
-                                      -- 
-                                      -- (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
-                                      --     `crops = [[0, 0], [0, 0]]`:
-                                      -- 
-                                      -- ```prettyprint
-                                      -- x = [[[[1], [3]], [[5], [7]]],
-                                      --      [[[2], [4]], [[10], [12]]],
-                                      --      [[[5], [7]], [[13], [15]]],
-                                      --      [[[6], [8]], [[14], [16]]]]
-                                      -- ```
-                                      -- 
-                                      -- The output tensor has shape `[1, 4, 4, 1]` and value:
-                                      -- 
-                                      -- ```prettyprint
-                                      -- x = [[[1],   [2],  [3],  [4]],
-                                      --      [[5],   [6],  [7],  [8]],
-                                      --      [[9],  [10], [11],  [12]],
-                                      --      [[13], [14], [15],  [16]]]
-                                      -- ```
-                                      -- 
-                                      -- (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
-                                      --     `crops = [[0, 0], [2, 0]]`:
-                                      -- 
-                                      -- ```prettyprint
-                                      -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
-                                      --      [[[0], [2], [4]]], [[[0], [10], [12]]],
-                                      --      [[[0], [5], [7]]], [[[0], [13], [15]]],
-                                      --      [[[0], [6], [8]]], [[[0], [14], [16]]]]
-                                      -- ```
-                                      -- 
-                                      -- The output tensor has shape `[2, 2, 4, 1]` and value:
-                                      -- 
-                                      -- ```prettyprint
-                                      -- x = [[[[1],   [2],  [3],  [4]],
-                                      --       [[5],   [6],  [7],  [8]]],
-                                      --      [[[9],  [10], [11],  [12]],
-                                      --       [[13], [14], [15],  [16]]]]
-                                      -- ```
-                  -> Tensor Value t -- ^ __output__
-batchToSpaceND input block_shape crops | eqLengthGuard [] =
-    buildOp (opDef "BatchToSpaceND"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tblock_shape" .~ tensorType (undefined :: tblock_shape)
-             & opAttr "Tcrops" .~ tensorType (undefined :: tcrops))
-        input block_shape crops
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tblock_shape"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tcrops"
-  type: "type"
-}
-input_arg {
-  description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has M dimensions."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D with shape `[M]`, all values must be >= 1."
-  name: "block_shape"
-  type_attr: "Tblock_shape"
-}
-input_arg {
-  description: "2-D with shape `[M, 2]`, all values must be >= 0.\n  `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input\n  dimension `i + 1`, which corresponds to spatial dimension `i`.  It is\n  required that\n  `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\n\nThis operation is equivalent to the following steps:\n\n1. Reshape `input` to `reshaped` of shape:\n     [block_shape[0], ..., block_shape[M-1],\n      batch / prod(block_shape),\n      input_shape[1], ..., input_shape[N-1]]\n\n2. Permute dimensions of `reshaped` to produce `permuted` of shape\n     [batch / prod(block_shape),\n\n      input_shape[1], block_shape[0],\n      ...,\n      input_shape[M], block_shape[M-1],\n\n      input_shape[M+1], ..., input_shape[N-1]]\n\n3. Reshape `permuted` to produce `reshaped_permuted` of shape\n     [batch / prod(block_shape),\n\n      input_shape[1] * block_shape[0],\n      ...,\n      input_shape[M] * block_shape[M-1],\n\n      input_shape[M+1],\n      ...,\n      input_shape[N-1]]\n\n4. Crop the start and end of dimensions `[1, ..., M]` of\n   `reshaped_permuted` according to `crops` to produce the output of shape:\n     [batch / prod(block_shape),\n\n      input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],\n      ...,\n      input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],\n\n      input_shape[M+1], ..., input_shape[N-1]]\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```prettyprint\nx = [[[1],   [2],  [3],  [4]],\n     [[5],   [6],  [7],  [8]],\n     [[9],  [10], [11],  [12]],\n     [[13], [14], [15],  [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [2, 0]]`:\n\n```prettyprint\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n     [[[0], [2], [4]]], [[[0], [10], [12]]],\n     [[[0], [5], [7]]], [[[0], [13], [15]]],\n     [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```"
-  name: "crops"
-  type_attr: "Tcrops"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | SpaceToBatch for 4-D tensors of type T.
---
--- This is a legacy version of the more general SpaceToBatchND.
--- 
--- Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
--- More specifically, this op outputs a copy of the input tensor where values from
--- the `height` and `width` dimensions are moved to the `batch` dimension. After
--- the zero-padding, both `height` and `width` of the input must be divisible by the
--- block size.
-spaceToBatch :: forall v1 v2 t tpaddings . (TensorType t, TensorType tpaddings,
-                                            OneOf '[Data.Int.Int32,
-                                                    Data.Int.Int64] tpaddings) =>
-                Data.Int.Int64 -- ^ __block_size__
-                -> Tensor v1 t -- ^ __input__: 4-D with shape `[batch, height, width, depth]`.
-                -> Tensor v2 tpaddings -- ^ __paddings__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
-                                       --   the padding of the input with zeros across the spatial dimensions as follows:
-                                       -- 
-                                       --       paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
-                                       -- 
-                                       --   The effective spatial dimensions of the zero-padded input tensor will be:
-                                       -- 
-                                       --       height_pad = pad_top + height + pad_bottom
-                                       --       width_pad = pad_left + width + pad_right
-                                       -- 
-                                       -- The attr `block_size` must be greater than one. It indicates the block size.
-                                       -- 
-                                       --   * Non-overlapping blocks of size `block_size x block size` in the height and
-                                       --     width dimensions are rearranged into the batch dimension at each location.
-                                       --   * The batch of the output tensor is `batch * block_size * block_size`.
-                                       --   * Both height_pad and width_pad must be divisible by block_size.
-                                       -- 
-                                       -- The shape of the output will be:
-                                       -- 
-                                       --     [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
-                                       --      depth]
-                                       -- 
-                                       -- Some examples:
-                                       -- 
-                                       -- (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
-                                       -- 
-                                       -- ```prettyprint
-                                       -- x = [[[[1], [2]], [[3], [4]]]]
-                                       -- ```
-                                       -- 
-                                       -- The output tensor has shape `[4, 1, 1, 1]` and value:
-                                       -- 
-                                       -- ```prettyprint
-                                       -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
-                                       -- ```
-                                       -- 
-                                       -- (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
-                                       -- 
-                                       -- ```prettyprint
-                                       -- x = [[[[1, 2, 3], [4, 5, 6]],
-                                       --       [[7, 8, 9], [10, 11, 12]]]]
-                                       -- ```
-                                       -- 
-                                       -- The output tensor has shape `[4, 1, 1, 3]` and value:
-                                       -- 
-                                       -- ```prettyprint
-                                       -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
-                                       -- ```
-                                       -- 
-                                       -- (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
-                                       -- 
-                                       -- ```prettyprint
-                                       -- x = [[[[1],   [2],  [3],  [4]],
-                                       --       [[5],   [6],  [7],  [8]],
-                                       --       [[9],  [10], [11],  [12]],
-                                       --       [[13], [14], [15],  [16]]]]
-                                       -- ```
-                                       -- 
-                                       -- The output tensor has shape `[4, 2, 2, 1]` and value:
-                                       -- 
-                                       -- ```prettyprint
-                                       -- x = [[[[1], [3]], [[5], [7]]],
-                                       --      [[[2], [4]], [[10], [12]]],
-                                       --      [[[5], [7]], [[13], [15]]],
-                                       --      [[[6], [8]], [[14], [16]]]]
-                                       -- ```
-                                       -- 
-                                       -- (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
-                                       -- 
-                                       -- ```prettyprint
-                                       -- x = [[[[1],   [2],  [3],  [4]],
-                                       --       [[5],   [6],  [7],  [8]]],
-                                       --      [[[9],  [10], [11],  [12]],
-                                       --       [[13], [14], [15],  [16]]]]
-                                       -- ```
-                                       -- 
-                                       -- The output tensor has shape `[8, 1, 2, 1]` and value:
-                                       -- 
-                                       -- ```prettyprint
-                                       -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
-                                       --      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
-                                       -- ```
-                                       -- 
-                                       -- Among others, this operation is useful for reducing atrous convolution into
-                                       -- regular convolution.
-                -> Tensor Value t -- ^ __output__
-spaceToBatch block_size input paddings | eqLengthGuard [] =
-    buildOp (opDef "SpaceToBatch"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings)
-             & opAttr "block_size" .~ block_size)
-        input paddings
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tpaddings"
-  type: "type"
-}
-attr {
-  has_minimum: true minimum: 2 name: "block_size" type: "int"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, depth]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\n  the padding of the input with zeros across the spatial dimensions as follows:\n\n      paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]\n\n  The effective spatial dimensions of the zero-padded input tensor will be:\n\n      height_pad = pad_top + height + pad_bottom\n      width_pad = pad_left + width + pad_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\n  * Non-overlapping blocks of size `block_size x block size` in the height and\n    width dimensions are rearranged into the batch dimension at each location.\n  * The batch of the output tensor is `batch * block_size * block_size`.\n  * Both height_pad and width_pad must be divisible by block_size.\n\nThe shape of the output will be:\n\n    [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n     depth]\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]],\n      [[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution."
-  name: "paddings"
-  type_attr: "Tpaddings"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Adjust the hue of one or more images.
---
--- `images` is a tensor of at least 3 dimensions.  The last dimension is
--- interpretted as channels, and must be three.
--- 
--- The input image is considered in the RGB colorspace. Conceptually, the RGB
--- colors are first mapped into HSV. A delta is then applied all the hue values,
--- and then remapped back to RGB colorspace.
-adjustHue :: Tensor v1 Float -- ^ __images__: Images to adjust.  At least 3-D.
-             -> Tensor v2 Float -- ^ __delta__: A float delta to add to the hue.
-             -> Tensor Value Float -- ^ __output__: The hue-adjusted image or images.
-adjustHue images delta | eqLengthGuard [] =
-    buildOp (opDef "AdjustHue")
-        images delta
-{-
-input_arg {
-  description: "Images to adjust.  At least 3-D."
-  name: "images"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A float delta to add to the hue."
-  name: "delta"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The hue-adjusted image or images."
-  name: "output"
-  type: DT_FLOAT
-}
--}
-
--- | SpaceToBatch for N-D tensors of type T.
---
--- This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
--- grid of blocks of shape `block_shape`, and interleaves these blocks with the
--- "batch" dimension (0) such that in the output, the spatial dimensions
--- `[1, ..., M]` correspond to the position within the grid, and the batch
--- dimension combines both the position within a spatial block and the original
--- batch position.  Prior to division into blocks, the spatial dimensions of the
--- input are optionally zero padded according to `paddings`.  See below for a
--- precise description.
-spaceToBatchND :: forall v1 v2 v3 t tblock_shape tpaddings . (TensorType t,
-                                                              TensorType tblock_shape,
-                                                              OneOf '[Data.Int.Int32,
-                                                                      Data.Int.Int64] tblock_shape,
-                                                              TensorType tpaddings,
-                                                              OneOf '[Data.Int.Int32,
-                                                                      Data.Int.Int64] tpaddings) =>
-                  Tensor v1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
-                              -- where spatial_shape has `M` dimensions.
-                  -> Tensor v2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1.
-                  -> Tensor v3 tpaddings -- ^ __paddings__: 2-D with shape `[M, 2]`, all values must be >= 0.
-                                         --   `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
-                                         --   `i + 1`, which corresponds to spatial dimension `i`.  It is required that
-                                         --   `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
-                                         -- 
-                                         -- This operation is equivalent to the following steps:
-                                         -- 
-                                         -- 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
-                                         --    input according to `paddings` to produce `padded` of shape `padded_shape`.
-                                         -- 
-                                         -- 2. Reshape `padded` to `reshaped_padded` of shape:
-                                         -- 
-                                         --      [batch] +
-                                         --      [padded_shape[1] / block_shape[0],
-                                         --        block_shape[0],
-                                         --       ...,
-                                         --       padded_shape[M] / block_shape[M-1],
-                                         --       block_shape[M-1]] +
-                                         --      remaining_shape
-                                         -- 
-                                         -- 3. Permute dimensions of `reshaped_padded` to produce
-                                         --    `permuted_reshaped_padded` of shape:
-                                         -- 
-                                         --      block_shape +
-                                         --      [batch] +
-                                         --      [padded_shape[1] / block_shape[0],
-                                         --       ...,
-                                         --       padded_shape[M] / block_shape[M-1]] +
-                                         --      remaining_shape
-                                         -- 
-                                         -- 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
-                                         --    dimension, producing an output tensor of shape:
-                                         -- 
-                                         --      [batch * prod(block_shape)] +
-                                         --      [padded_shape[1] / block_shape[0],
-                                         --       ...,
-                                         --       padded_shape[M] / block_shape[M-1]] +
-                                         --      remaining_shape
-                                         -- 
-                                         -- Some examples:
-                                         -- 
-                                         -- (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
-                                         --     `paddings = [[0, 0], [0, 0]]`:
-                                         -- 
-                                         -- ```prettyprint
-                                         -- x = [[[[1], [2]], [[3], [4]]]]
-                                         -- ```
-                                         -- 
-                                         -- The output tensor has shape `[4, 1, 1, 1]` and value:
-                                         -- 
-                                         -- ```prettyprint
-                                         -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
-                                         -- ```
-                                         -- 
-                                         -- (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
-                                         --     `paddings = [[0, 0], [0, 0]]`:
-                                         -- 
-                                         -- ```prettyprint
-                                         -- x = [[[[1, 2, 3], [4, 5, 6]],
-                                         --       [[7, 8, 9], [10, 11, 12]]]]
-                                         -- ```
-                                         -- 
-                                         -- The output tensor has shape `[4, 1, 1, 3]` and value:
-                                         -- 
-                                         -- ```prettyprint
-                                         -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
-                                         -- ```
-                                         -- 
-                                         -- (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
-                                         --     `paddings = [[0, 0], [0, 0]]`:
-                                         -- 
-                                         -- ```prettyprint
-                                         -- x = [[[[1],   [2],  [3],  [4]],
-                                         --       [[5],   [6],  [7],  [8]],
-                                         --       [[9],  [10], [11],  [12]],
-                                         --       [[13], [14], [15],  [16]]]]
-                                         -- ```
-                                         -- 
-                                         -- The output tensor has shape `[4, 2, 2, 1]` and value:
-                                         -- 
-                                         -- ```prettyprint
-                                         -- x = [[[[1], [3]], [[5], [7]]],
-                                         --      [[[2], [4]], [[10], [12]]],
-                                         --      [[[5], [7]], [[13], [15]]],
-                                         --      [[[6], [8]], [[14], [16]]]]
-                                         -- ```
-                                         -- 
-                                         -- (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
-                                         --     paddings = `[[0, 0], [2, 0]]`:
-                                         -- 
-                                         -- ```prettyprint
-                                         -- x = [[[[1],   [2],  [3],  [4]],
-                                         --       [[5],   [6],  [7],  [8]]],
-                                         --      [[[9],  [10], [11],  [12]],
-                                         --       [[13], [14], [15],  [16]]]]
-                                         -- ```
-                                         -- 
-                                         -- The output tensor has shape `[8, 1, 3, 1]` and value:
-                                         -- 
-                                         -- ```prettyprint
-                                         -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
-                                         --      [[[0], [2], [4]]], [[[0], [10], [12]]],
-                                         --      [[[0], [5], [7]]], [[[0], [13], [15]]],
-                                         --      [[[0], [6], [8]]], [[[0], [14], [16]]]]
-                                         -- ```
-                                         -- 
-                                         -- Among others, this operation is useful for reducing atrous convolution into
-                                         -- regular convolution.
-                  -> Tensor Value t -- ^ __output__
-spaceToBatchND input block_shape paddings | eqLengthGuard [] =
-    buildOp (opDef "SpaceToBatchND"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tblock_shape" .~ tensorType (undefined :: tblock_shape)
-             & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings))
-        input block_shape paddings
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tblock_shape"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tpaddings"
-  type: "type"
-}
-input_arg {
-  description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has `M` dimensions."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D with shape `[M]`, all values must be >= 1."
-  name: "block_shape"
-  type_attr: "Tblock_shape"
-}
-input_arg {
-  description: "2-D with shape `[M, 2]`, all values must be >= 0.\n  `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n  `i + 1`, which corresponds to spatial dimension `i`.  It is required that\n  `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\n\nThis operation is equivalent to the following steps:\n\n1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n   input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n2. Reshape `padded` to `reshaped_padded` of shape:\n\n     [batch] +\n     [padded_shape[1] / block_shape[0],\n       block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1],\n      block_shape[M-1]] +\n     remaining_shape\n\n3. Permute dimensions of `reshaped_padded` to produce\n   `permuted_reshaped_padded` of shape:\n\n     block_shape +\n     [batch] +\n     [padded_shape[1] / block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1]] +\n     remaining_shape\n\n4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n   dimension, producing an output tensor of shape:\n\n     [batch * prod(block_shape)] +\n     [padded_shape[1] / block_shape[0],\n      ...,\n      padded_shape[M] / block_shape[M-1]] +\n     remaining_shape\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n    `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]],\n      [[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n    paddings = `[[0, 0], [2, 0]]`:\n\n```prettyprint\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 3, 1]` and value:\n\n```prettyprint\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n     [[[0], [2], [4]]], [[[0], [10], [12]]],\n     [[[0], [5], [7]]], [[[0], [13], [15]]],\n     [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution."
-  name: "paddings"
-  type_attr: "Tpaddings"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Returns the diagonal part of the tensor.
---
--- This operation returns a tensor with the `diagonal` part
--- of the `input`. The `diagonal` part is computed as follows:
--- 
--- Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
--- tensor of rank `k` with dimensions `[D1,..., Dk]` where:
--- 
--- `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
--- 
--- For example:
--- 
--- ```prettyprint
--- # 'input' is [[1, 0, 0, 0]
---               [0, 2, 0, 0]
---               [0, 0, 3, 0]
---               [0, 0, 0, 4]]
--- 
--- tf.diag_part(input) ==> [1, 2, 3, 4]
--- ```
-diagPart :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                 (Data.Complex.Complex Float),
-                                                 Data.Int.Int32, Data.Int.Int64,
-                                                 Double, Float] t) =>
-            Tensor v1 t -- ^ __input__: Rank k tensor where k is 2, 4, or 6.
-            -> Tensor Value t -- ^ __diagonal__: The extracted diagonal.
-diagPart input | eqLengthGuard [] =
-    buildOp (opDef "DiagPart"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Rank k tensor where k is 2, 4, or 6."
-  name: "input"
-  type_attr: "T"
-}
-output_arg {
-  description: "The extracted diagonal."
-  name: "diagonal"
-  type_attr: "T"
-}
--}
-
--- | A placeholder op for a value that will be fed into the computation.
---
--- N.B. This operation will fail with an error if it is executed. It is
--- intended as a way to represent a value that will always be fed, and to
--- provide attrs that enable the fed value to be checked at runtime.
-placeholderV2 :: forall dtype . (TensorType dtype) =>
-                 Shape -- ^ __shape__: The shape of the tensor. The shape can be any partially-specified
-                       -- shape.  To be unconstrained, pass in a shape with unknown rank.
-                 -> Tensor Value dtype -- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism.
-placeholderV2 shape | eqLengthGuard [] =
-    buildOp (opDef "PlaceholderV2"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "shape" .~ shape)
-        
-{-
-attr {
-  description: "The type of elements in the tensor."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  description: "The shape of the tensor. The shape can be any partially-specified\nshape.  To be unconstrained, pass in a shape with unknown rank."
-  name: "shape"
-  type: "shape"
-}
-output_arg {
-  description: "A placeholder tensor that must be replaced using the feed mechanism."
-  name: "output"
-  type_attr: "dtype"
-}
--}
-
--- | Computes acos of x element-wise.
-
-acos :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                             (Data.Complex.Complex Float),
-                                             Data.Int.Int32, Data.Int.Int64,
-                                             Data.Word.Word16, Double,
-                                             Float] t) => Tensor v1 t -- ^ __x__
-        -> Tensor Value t -- ^ __y__
-acos x | eqLengthGuard [] =
-    buildOp (opDef "Acos"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | A placeholder op for a value that will be fed into the computation.
---
--- N.B. This operation will fail with an error if it is executed. It is
--- intended as a way to represent a value that will always be fed, and to
--- provide attrs that enable the fed value to be checked at runtime.
-placeholder :: forall dtype . (TensorType dtype) =>
-               Tensor Value dtype -- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism.
-placeholder  | eqLengthGuard [] =
-    buildOp (opDef "Placeholder"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        
-{-
-attr {
-  description: "The type of elements in the tensor."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  default_value { shape { } }
-  description: "(Optional) The shape of the tensor. If the shape has 0 dimensions, the\nshape is unconstrained."
-  name: "shape"
-  type: "shape"
-}
-output_arg {
-  description: "A placeholder tensor that must be replaced using the feed mechanism."
-  name: "output"
-  type_attr: "dtype"
-}
--}
-
--- | Does nothing. Serves as a control trigger for scheduling.
---
--- Only useful as a placeholder for control edges.
-controlTrigger :: ControlNode
-controlTrigger  | eqLengthGuard [] =
-    buildOp (opDef "ControlTrigger")
-        
-{-
-
--}
-
--- | Computes atan of x element-wise.
-
-atan :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                             (Data.Complex.Complex Float),
-                                             Data.Int.Int32, Data.Int.Int64,
-                                             Data.Word.Word16, Double,
-                                             Float] t) => Tensor v1 t -- ^ __x__
-        -> Tensor Value t -- ^ __y__
-atan x | eqLengthGuard [] =
-    buildOp (opDef "Atan"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Pads a tensor with mirrored values.
---
--- This operation pads a `input` with mirrored values according to the `paddings`
--- you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
--- the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
--- how many values to add before the contents of `input` in that dimension, and
--- `paddings[D, 1]` indicates how many values to add after the contents of `input`
--- in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
--- than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
--- (if false, respectively).
--- 
--- The padded size of each dimension D of the output is:
--- 
--- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
--- 
--- For example:
--- 
--- ```prettyprint
--- # 't' is [[1, 2, 3], [4, 5, 6]].
--- # 'paddings' is [[1, 1]], [2, 2]].
--- # 'mode' is SYMMETRIC.
--- # rank of 't' is 2.
--- pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
---                       [2, 1, 1, 2, 3, 3, 2]
---                       [5, 4, 4, 5, 6, 6, 5]
---                       [5, 4, 4, 5, 6, 6, 5]]
--- ```
-mirrorPad :: forall v1 v2 t tpaddings . (TensorType t, TensorType tpaddings,
-                                         OneOf '[Data.Int.Int32,
-                                                 Data.Int.Int64] tpaddings) =>
-             Tensor v1 t -- ^ __input__: The input tensor to be padded.
-             -> Tensor v2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
-                                    -- rows must be the same as the rank of `input`.
-             -> Tensor Value t -- ^ __output__: The padded tensor.
-mirrorPad input paddings | eqLengthGuard [] =
-    buildOp (opDef "MirrorPad"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings))
-        input paddings
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tpaddings"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
-  description: "Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions\ndo not include the borders, while in symmetric mode the padded regions\ndo include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`\nis `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and\nit is `[1, 2, 3, 3, 2]` in symmetric mode."
-  name: "mode"
-  type: "string"
-}
-input_arg {
-  description: "The input tensor to be padded."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
-  name: "paddings"
-  type_attr: "Tpaddings"
-}
-output_arg {
-  description: "The padded tensor." name: "output" type_attr: "T"
-}
--}
-
--- | Returns locations of true values in a boolean tensor.
---
--- This operation returns the coordinates of true elements in `input`. The
--- coordinates are returned in a 2-D tensor where the first dimension (rows)
--- represents the number of true elements, and the second dimension (columns)
--- represents the coordinates of the true elements. Keep in mind, the shape of
--- the output tensor can vary depending on how many true values there are in
--- `input`. Indices are output in row-major order.
--- 
--- For example:
--- 
--- ```prettyprint
--- # 'input' tensor is [[True, False]
--- #                    [True, False]]
--- # 'input' has two true values, so output has two coordinates.
--- # 'input' has rank of 2, so coordinates have two indices.
--- where(input) ==> [[0, 0],
---                   [1, 0]]
--- 
--- # `input` tensor is [[[True, False]
--- #                     [True, False]]
--- #                    [[False, True]
--- #                     [False, True]]
--- #                    [[False, False]
--- #                     [False, True]]]
--- # 'input' has 5 true values, so output has 5 coordinates.
--- # 'input' has rank of 3, so coordinates have three indices.
--- where(input) ==> [[0, 0, 0],
---                   [0, 1, 0],
---                   [1, 0, 1],
---                   [1, 1, 1],
---                   [2, 1, 1]]
--- ```
-where' :: Tensor v1 Bool -- ^ __input__
-          -> Tensor Value Data.Int.Int64 -- ^ __index__
-where' input | eqLengthGuard [] =
-    buildOp (opDef "Where")
-        input
-{-
-input_arg { name: "input" type: DT_BOOL }
-output_arg { name: "index" type: DT_INT64 }
--}
-
--- | Computes gradients of average pooling function.
-
-avgPool3DGrad :: forall v1 v2 t . (TensorType t,
-                                   OneOf '[(Data.Complex.Complex Double),
-                                           (Data.Complex.Complex Float),
-                                           Data.Int.Int16, Data.Int.Int32,
-                                           Data.Int.Int64, Data.Int.Int8,
-                                           Data.Word.Word16, Data.Word.Word8,
-                                           Double, Float] t) =>
-                 Tensor v1 Data.Int.Int32 -- ^ __orig_input_shape__: The original input dimensions.
-                 -> Tensor v2 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`.
-                 -> Tensor Value t -- ^ __output__: The backprop for input.
-avgPool3DGrad orig_input_shape grad | eqLengthGuard [] =
-    buildOp (opDef "AvgPool3DGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        orig_input_shape grad
-{-
-attr {
-  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The original input dimensions."
-  name: "orig_input_shape"
-  type: DT_INT32
-}
-input_arg {
-  description: "Output backprop of shape `[batch, depth, rows, cols, channels]`."
-  name: "grad"
-  type_attr: "T"
-}
-output_arg {
-  description: "The backprop for input."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Restore a Reader to its initial clean state.
-
-readerReset :: Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
-               -> Build (ControlNode)
-readerReset reader_handle | eqLengthGuard [] =
-    buildOp (opDef "ReaderReset")
-        reader_handle
-{-
-input_arg {
-  description: "Handle to a Reader."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
--}
-
--- | Returns the gradient of `Tile`.
---
--- Since `Tile` takes an input and repeats the input `multiples` times
--- along each dimension, `TileGrad` takes in `multiples` and aggregates
--- each repeated tile of `input` into `output`.
-tileGrad :: forall v1 v2 t . (TensorType t) => Tensor v1 t -- ^ __input__
-            -> Tensor v2 Data.Int.Int32 -- ^ __multiples__
-            -> Tensor Value t -- ^ __output__
-tileGrad input multiples | eqLengthGuard [] =
-    buildOp (opDef "TileGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input multiples
-{-
-attr { name: "T" type: "type" }
-input_arg { name: "input" type_attr: "T" }
-input_arg { name: "multiples" type: DT_INT32 }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Inserts a dimension of 1 into a tensor's shape.
---
--- Given a tensor `input`, this operation inserts a dimension of 1 at the
--- dimension index `dim` of `input`'s shape. The dimension index `dim` starts at
--- zero; if you specify a negative number for `dim` it is counted backward from
--- the end.
--- 
--- This operation is useful if you want to add a batch dimension to a single
--- element. For example, if you have a single image of shape `[height, width,
--- channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
--- which will make the shape `[1, height, width, channels]`.
--- 
--- Other examples:
--- 
--- ```prettyprint
--- # 't' is a tensor of shape [2]
--- shape(expand_dims(t, 0)) ==> [1, 2]
--- shape(expand_dims(t, 1)) ==> [2, 1]
--- shape(expand_dims(t, -1)) ==> [2, 1]
--- 
--- # 't2' is a tensor of shape [2, 3, 5]
--- shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
--- shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
--- shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
--- ```
--- 
--- This operation requires that:
--- 
--- `-1-input.dims() <= dim <= input.dims()`
--- 
--- This operation is related to `squeeze()`, which removes dimensions of
--- size 1.
-expandDims :: forall v1 v2 t tdim . (TensorType t, TensorType tdim,
-                                     OneOf '[Data.Int.Int32,
-                                             Data.Int.Int64] tdim) =>
-              Tensor v1 t -- ^ __input__
-              -> Tensor v2 tdim -- ^ __dim__: 0-D (scalar). Specifies the dimension index at which to
-                                -- expand the shape of `input`.
-              -> Tensor Value t -- ^ __output__: Contains the same data as `input`, but its shape has an additional
-              -- dimension of size 1 added.
-expandDims input dim | eqLengthGuard [] =
-    buildOp (opDef "ExpandDims"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tdim" .~ tensorType (undefined :: tdim))
-        input dim
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tdim"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-input_arg {
-  description: "0-D (scalar). Specifies the dimension index at which to\nexpand the shape of `input`."
-  name: "dim"
-  type_attr: "Tdim"
-}
-output_arg {
-  description: "Contains the same data as `input`, but its shape has an additional\ndimension of size 1 added."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Outputs a `Summary` protocol buffer with a tensor.
-
-tensorSummary :: forall v1 t . (TensorType t) =>
-                 Tensor v1 t -- ^ __tensor__: A tensor to serialize.
-                 -> Tensor Value Data.ByteString.ByteString -- ^ __summary__
-tensorSummary tensor | eqLengthGuard [] =
-    buildOp (opDef "TensorSummary"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        tensor
-{-
-attr { name: "T" type: "type" }
-attr {
-  default_value { s: "" }
-  description: "A json-encoded SummaryDescription proto."
-  name: "description"
-  type: "string"
-}
-attr {
-  default_value { list { } }
-  description: "An unused list of strings."
-  name: "labels"
-  type: "list(string)"
-}
-attr {
-  default_value { s: "" }
-  description: "An unused string."
-  name: "display_name"
-  type: "string"
-}
-input_arg {
-  description: "A tensor to serialize." name: "tensor" type_attr: "T"
-}
-output_arg { name: "summary" type: DT_STRING }
--}
-
--- | Constructs a tensor by tiling a given tensor.
---
--- This operation creates a new tensor by replicating `input` `multiples` times.
--- The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
--- and the values of `input` are replicated `multiples[i]` times along the 'i'th
--- dimension. For example, tiling `[a b c d]` by `[2]` produces
--- `[a b c d a b c d]`.
-tile :: forall v1 v2 t tmultiples . (TensorType t, TensorType tmultiples,
-                                     OneOf '[Data.Int.Int32,
-                                             Data.Int.Int64] tmultiples) =>
-        Tensor v1 t -- ^ __input__: 1-D or higher.
-        -> Tensor v2 tmultiples -- ^ __multiples__: 1-D. Length must be the same as the number of dimensions in `input`
-        -> Tensor Value t -- ^ __output__
-tile input multiples | eqLengthGuard [] =
-    buildOp (opDef "Tile"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tmultiples" .~ tensorType (undefined :: tmultiples))
-        input multiples
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tmultiples"
-  type: "type"
-}
-input_arg {
-  description: "1-D or higher." name: "input" type_attr: "T"
-}
-input_arg {
-  description: "1-D. Length must be the same as the number of dimensions in `input`"
-  name: "multiples"
-  type_attr: "Tmultiples"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Return a strided slice from `input`.
---
--- Note, most python users will want to use the Python `Tensor.__getitem__`
--- or `Variable.__getitem__` rather than this op directly.
--- 
--- The goal of this op is to produce a new tensor with a subset of
--- the elements from the `n` dimensional `input` tensor. The subset is chosen using
--- a sequence of `m` sparse range specifications encoded into the arguments
--- of this function. Note, in some cases
--- `m` could be equal to `n`, but this need not be the case. Each
--- range specification entry can be one of the following:
--- 
--- - An ellipsis (...). Ellipses are used to imply zero or more
---   dimensions of full-dimension selection and are produced using
---   `ellipsis_mask`. For example, `foo[...]` is the identity slice.
--- 
--- - A new axis. This is used to insert a new shape=1 dimension and is
---   produced using `new_axis_mask`. For example, `foo[:, ...]` where
---   `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
--- 
--- 
--- - A range `begin:end:stride`. This is used to specify how much to choose from
---   a given dimension. `stride` can be any integer but 0.  `begin` is an integer
---   which represents the index of the first value to select while `end` represents
---   the index of the last value to select. The number of values selected in each
---   dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
---   `begin` and `end` can be negative where `-1` is the last element, `-2` is
---   the second to last. `begin_mask` controls whether to replace the explicitly
---   given `begin` with an implicit effective value of `0` if `stride > 0` and
---   `-1` if `stride < 0`. `end_mask` is analogous but produces the number
---   required to create the largest open interval. For example, given a shape
---   `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
---   not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
---   and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
---   first dimension of a tensor while dropping the last two (in the original
---   order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
--- 
--- - A single index. This is used to keep only elements that have a given
---   index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
---   shape `(6,)` tensor. This is encoded in `begin` and `end` and
---   `shrink_axis_mask`.
--- 
--- Each conceptual range specification is encoded in the op's argument. This
--- encoding is best understand by considering a non-trivial example. In
--- particular,
--- `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
--- 
--- ```prettyprint
--- begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
--- end = [2, 4, x, x, -3, x]
--- strides = [1, 1, x, x, -1, 1]
--- begin_mask = 1<<4 | 1 << 5 = 48
--- end_mask = 1<<5 = 32
--- ellipsis_mask = 1<<3 = 8
--- new_axis_mask = 1<<2 4
--- shrink_axis_mask = 1<<0
--- ```
--- 
--- In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
--- the slice becomes (2, 1, 5, 5, 2, 5).
--- Let us walk step by step through each argument specification.
--- 
--- 1.  The first argument in the example slice is turned into `begin = 1` and
--- `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
--- also set the appropriate bit in `shrink_axis_mask`.
--- 
--- 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
--- zero bits contributed.
--- 
--- 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
--- dimension in the final shape. Dummy values are contributed to begin,
--- end and stride, while the new_axis_mask bit is set.
--- 
--- 4. `...` grab the full ranges from as many dimensions as needed to
--- fully specify a slice for every dimension of the input shape.
--- 
--- 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
--- with a dimension that has shape `s` is converted to a positive index
--- `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
--- is done internally so begin, end and strides receive x, -3, and -1.
--- The appropriate begin_mask bit is set to indicate the start range is the
--- full range (ignoring the x).
--- 
--- 6. `:` indicates that the entire contents of the corresponding dimension
--- is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
--- receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
--- `end_mask` are also set.
--- 
--- *Requirements*:
---   `0 != strides[i] for i in [0, m)`
---   `ellipsis_mask must be a power of two (only one ellipsis)`
-stridedSlice :: forall v1 v2 v3 v4 t index . (TensorType t, TensorType index,
-                                              OneOf '[Data.Int.Int32,
-                                                      Data.Int.Int64] index) =>
-                Tensor v1 t -- ^ __input__
-                -> Tensor v2 index -- ^ __begin__: `begin[k]` specifies the offset into the `k`th range specification.
-                                   -- The exact dimension this corresponds to will be determined by context.
-                                   -- Out-of-bounds values will be silently clamped. If the `k`th bit of
-                                   -- `begin_mask` then `begin[k]` is ignored and the full range of the
-                                   -- appropriate dimension is used instead. Negative values causes indexing
-                                   -- to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
-                -> Tensor v3 index -- ^ __end__: `end[i]` is like `begin` with the exception that `end_mask` is
-                                   -- used to determine full ranges.
-                -> Tensor v4 index -- ^ __strides__: `strides[i]` specifies the increment in the `i`th specification
-                                   -- after extracting a given element. Negative indices will reverse
-                                   -- the original order. Out or range values are
-                                   -- clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
-                -> Tensor Value t -- ^ __output__
-stridedSlice input begin end strides | eqLengthGuard [] =
-    buildOp (opDef "StridedSlice"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Index" .~ tensorType (undefined :: index))
-        input begin end strides
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Index"
-  type: "type"
-}
-attr {
-  default_value { i: 0 }
-  description: "a bitmask where a bit i being 1 means to ignore the begin\nvalue and instead use the largest interval possible. At runtime\nbegin[i] will be replaced with `[0, n-1) if `stride[i] > 0` or\n`[-1, n-1]` if `stride[i] < 0`"
-  name: "begin_mask"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "analogous to `begin_mask`"
-  name: "end_mask"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "a bitmask where bit `i` being 1 means the `i`th\nposition is actually an ellipsis. One bit at most can be 1.\nIf `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`\nis provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis\nimplicitly creates as many range specifications as necessary to fully\nspecify the sliced range for every dimension. For example for a 4-dimensional\ntensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`."
-  name: "ellipsis_mask"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "a bitmask where bit `i` being 1 means the `i`th\nspecification creates a new shape 1 dimension. For example\n`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor."
-  name: "new_axis_mask"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "a bitmask where bit `i` implies that the `i`th\nspecification should shrink the dimensionality. begin and end\nmust imply a slice of size 1 in the dimension. For example in\npython one might do `foo[:, 3, :]` which would result in\n`shrink_axis_mask` being 2."
-  name: "shrink_axis_mask"
-  type: "int"
-}
-input_arg { name: "input" type_attr: "T" }
-input_arg {
-  description: "`begin[k]` specifies the offset into the `k`th range specification.\nThe exact dimension this corresponds to will be determined by context.\nOut-of-bounds values will be silently clamped. If the `k`th bit of\n`begin_mask` then `begin[k]` is ignored and the full range of the\nappropriate dimension is used instead. Negative values causes indexing\nto start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`."
-  name: "begin"
-  type_attr: "Index"
-}
-input_arg {
-  description: "`end[i]` is like `begin` with the exception that `end_mask` is\nused to determine full ranges."
-  name: "end"
-  type_attr: "Index"
-}
-input_arg {
-  description: "`strides[i]` specifies the increment in the `i`th specification\nafter extracting a given element. Negative indices will reverse\nthe original order. Out or range values are\nclamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`"
-  name: "strides"
-  type_attr: "Index"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Return a slice from 'input'.
---
--- The output tensor is a tensor with dimensions described by 'size'
--- whose values are extracted from 'input' starting at the offsets in
--- 'begin'.
--- 
--- *Requirements*:
---   0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
-slice :: forall v1 v2 v3 t index . (TensorType t, TensorType index,
-                                    OneOf '[Data.Int.Int32,
-                                            Data.Int.Int64] index) =>
-         Tensor v1 t -- ^ __input__
-         -> Tensor v2 index -- ^ __begin__: begin[i] specifies the offset into the 'i'th dimension of
-                            -- 'input' to slice from.
-         -> Tensor v3 index -- ^ __size__: size[i] specifies the number of elements of the 'i'th dimension
-                            -- of 'input' to slice. If size[i] is -1, all remaining elements in dimension
-                            -- i are included in the slice (i.e. this is equivalent to setting
-                            -- size[i] = input.dim_size(i) - begin[i]).
-         -> Tensor Value t -- ^ __output__
-slice input begin size | eqLengthGuard [] =
-    buildOp (opDef "Slice"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Index" .~ tensorType (undefined :: index))
-        input begin size
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Index"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-input_arg {
-  description: "begin[i] specifies the offset into the \'i\'th dimension of\n\'input\' to slice from."
-  name: "begin"
-  type_attr: "Index"
-}
-input_arg {
-  description: "size[i] specifies the number of elements of the \'i\'th dimension\nof \'input\' to slice. If size[i] is -1, all remaining elements in dimension\ni are included in the slice (i.e. this is equivalent to setting\nsize[i] = input.dim_size(i) - begin[i])."
-  name: "size"
-  type_attr: "Index"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Computes a 2D convolution given quantized 4D input and filter tensors.
---
--- The inputs are quantized tensors where the lowest value represents the real
--- number of the associated minimum, and the highest represents the maximum.
--- This means that you can only interpret the quantized output in the same way, by
--- taking the returned minimum and maximum values into account.
-quantizedConv2D :: forall v1 v2 v3 v4 v5 v6 tinput tfilter
-                   out_type . (TensorType tinput, OneOf '[Data.Int.Int16,
-                                                          Data.Int.Int32,
-                                                          Data.Word.Word16,
-                                                          Data.Word.Word8] tinput,
-                               TensorType tfilter, OneOf '[Data.Int.Int16,
-                                                           Data.Int.Int32,
-                                                           Data.Word.Word16,
-                                                           Data.Word.Word8] tfilter,
-                               TensorType out_type, OneOf '[Data.Int.Int16,
-                                                            Data.Int.Int32,
-                                                            Data.Word.Word16,
-                                                            Data.Word.Word8] out_type) =>
-                   Tensor v1 tinput -- ^ __input__
-                   -> Tensor v2 tfilter -- ^ __filter__: filter's input_depth dimension must match input's depth dimensions.
-                   -> Tensor v3 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
-                   -> Tensor v4 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
-                   -> Tensor v5 Float -- ^ __min_filter__: The float value that the lowest quantized filter value represents.
-                   -> Tensor v6 Float -- ^ __max_filter__: The float value that the highest quantized filter value represents.
-                   -> (Tensor Value out_type, Tensor Value Float,
-                       Tensor Value Float)
-                   -- ^ (__output__, __min_output__, __max_output__)
-                   --
-                   -- * __output__
-                   --
-                   -- * __min_output__: The float value that the lowest quantized output value represents.
-                   --
-                   -- * __max_output__: The float value that the highest quantized output value represents.
-quantizedConv2D input filter min_input max_input min_filter
-                max_filter | eqLengthGuard [] =
-    buildOp (opDef "QuantizedConv2D"
-             & opAttr "Tinput" .~ tensorType (undefined :: tinput)
-             & opAttr "Tfilter" .~ tensorType (undefined :: tfilter)
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        input filter min_input max_input min_filter max_filter
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "Tinput"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "Tfilter"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  default_value { type: DT_QINT32 }
-  name: "out_type"
-  type: "type"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the input\ntensor."
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg { name: "input" type_attr: "Tinput" }
-input_arg {
-  description: "filter\'s input_depth dimension must match input\'s depth dimensions."
-  name: "filter"
-  type_attr: "Tfilter"
-}
-input_arg {
-  description: "The float value that the lowest quantized input value represents."
-  name: "min_input"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the highest quantized input value represents."
-  name: "max_input"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the lowest quantized filter value represents."
-  name: "min_filter"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the highest quantized filter value represents."
-  name: "max_filter"
-  type: DT_FLOAT
-}
-output_arg { name: "output" type_attr: "out_type" }
-output_arg {
-  description: "The float value that the lowest quantized output value represents."
-  name: "min_output"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The float value that the highest quantized output value represents."
-  name: "max_output"
-  type: DT_FLOAT
-}
--}
-
--- | Computes rectified linear 6 gradients for a Relu6 operation.
-
-relu6Grad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                     Data.Int.Int32,
-                                                     Data.Int.Int64,
-                                                     Data.Int.Int8,
-                                                     Data.Word.Word16,
-                                                     Data.Word.Word8, Double,
-                                                     Float] t) =>
-             Tensor v1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu6 operation.
-             -> Tensor v2 t -- ^ __features__: The features passed as input to the corresponding Relu6 operation.
-             -> Tensor Value t -- ^ __backprops__: The gradients:
-             -- `gradients * (features > 0) * (features < 6)`.
-relu6Grad gradients features | eqLengthGuard [] =
-    buildOp (opDef "Relu6Grad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        gradients features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The backpropagated gradients to the corresponding Relu6 operation."
-  name: "gradients"
-  type_attr: "T"
-}
-input_arg {
-  description: "The features passed as input to the corresponding Relu6 operation."
-  name: "features"
-  type_attr: "T"
-}
-output_arg {
-  description: "The gradients:\n`gradients * (features > 0) * (features < 6)`."
-  name: "backprops"
-  type_attr: "T"
-}
--}
-
--- | Computes gradients of the average pooling function.
-
-avgPoolGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                                       Float] t) =>
-               Tensor v1 Data.Int.Int32 -- ^ __orig_input_shape__: 1-D.  Shape of the original input to `avg_pool`.
-               -> Tensor v2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
-                              -- the output of `avg_pool`.
-               -> Tensor Value t -- ^ __output__: 4-D.  Gradients w.r.t. the input of `avg_pool`.
-avgPoolGrad orig_input_shape grad | eqLengthGuard [] =
-    buildOp (opDef "AvgPoolGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        orig_input_shape grad
-{-
-attr {
-  description: "The size of the sliding window for each dimension of the input."
-  has_minimum: true
-  minimum: 4
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the input."
-  has_minimum: true
-  minimum: 4
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values { list { s: "NHWC" s: "NCHW" } }
-  default_value { s: "NHWC" }
-  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
-  name: "data_format"
-  type: "string"
-}
-attr {
-  allowed_values {
-    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "1-D.  Shape of the original input to `avg_pool`."
-  name: "orig_input_shape"
-  type: DT_INT32
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.\nthe output of `avg_pool`."
-  name: "grad"
-  type_attr: "T"
-}
-output_arg {
-  description: "4-D.  Gradients w.r.t. the input of `avg_pool`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Split elements of `input` based on `delimiter` into a `SparseTensor`.
---
--- Let N be the size of source (typically N will be the batch size). Split each
--- element of `input` based on `delimiter` and return a `SparseTensor`
--- containing the splitted tokens. Empty tokens are ignored.
--- 
--- `delimiter` can be empty or a single-byte character. If `delimiter` is an empty
---  string, each element of `input` is split into individual single-byte character
---  strings, including splitting of UTF-8 multibyte sequences.
--- 
--- For example:
---   N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
---   will be
--- 
---   indices = [0, 0;
---              0, 1;
---              1, 0;
---              1, 1;
---              1, 2]
---   shape = [2, 3]
---   values = ['hello', 'world', 'a', 'b', 'c']
-stringSplit :: Tensor v1 Data.ByteString.ByteString -- ^ __input__: 1-D. Strings to split.
-               -> Tensor v2 Data.ByteString.ByteString -- ^ __delimiter__: 0-D. Delimiter character, or empty string.
-               -> (Tensor Value Data.Int.Int64,
-                   Tensor Value Data.ByteString.ByteString,
-                   Tensor Value Data.Int.Int64)
-               -- ^ (__indices__, __values__, __shape__)
-               --
-               -- * __indices__: A dense matrix of int64 representing the indices of the sparse tensor.
-               --
-               -- * __values__: A vector of strings corresponding to the splited values.
-               --
-               -- * __shape__: a length-2 vector of int64 representing the shape of the sparse
-               -- tensor, where the first value is N and the second value is the maximum number
-               -- of tokens in a single input entry.
-stringSplit input delimiter | eqLengthGuard [] =
-    buildOp (opDef "StringSplit")
-        input delimiter
-{-
-input_arg {
-  description: "1-D. Strings to split." name: "input" type: DT_STRING
-}
-input_arg {
-  description: "0-D. Delimiter character, or empty string."
-  name: "delimiter"
-  type: DT_STRING
-}
-output_arg {
-  description: "A dense matrix of int64 representing the indices of the sparse tensor."
-  name: "indices"
-  type: DT_INT64
-}
-output_arg {
-  description: "A vector of strings corresponding to the splited values."
-  name: "values"
-  type: DT_STRING
-}
-output_arg {
-  description: "a length-2 vector of int64 representing the shape of the sparse\ntensor, where the first value is N and the second value is the maximum number\nof tokens in a single input entry."
-  name: "shape"
-  type: DT_INT64
-}
--}
-
--- | Returns the rank of a tensor.
---
--- This operation returns an integer representing the rank of `input`.
--- 
--- For example:
--- 
--- ```prettyprint
--- # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
--- # shape of tensor 't' is [2, 2, 3]
--- rank(t) ==> 3
--- ```
--- 
--- **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
--- of a tensor is the number of indices required to uniquely select each element
--- of the tensor. Rank is also known as "order", "degree", or "ndims."
-rank :: forall v1 t . (TensorType t) => Tensor v1 t -- ^ __input__
-        -> Tensor Value Data.Int.Int32 -- ^ __output__
-rank input | eqLengthGuard [] =
-    buildOp (opDef "Rank"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { name: "T" type: "type" }
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type: DT_INT32 }
--}
-
--- | Computes the reciprocal of x element-wise.
---
--- I.e., \\(y = 1 / x\\).
-reciprocal :: forall v1 t . (TensorType t,
-                             OneOf '[(Data.Complex.Complex Double),
-                                     (Data.Complex.Complex Float),
-                                     Data.Int.Int32, Data.Int.Int64,
-                                     Data.Word.Word16, Double, Float] t) =>
-              Tensor v1 t -- ^ __x__
-              -> Tensor Value t -- ^ __y__
-reciprocal x | eqLengthGuard [] =
-    buildOp (opDef "Reciprocal"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Reverses variable length slices.
---
--- This op first slices `input` along the dimension `batch_dim`, and for each
--- slice `i`, reverses the first `seq_lengths[i]` elements along
--- the dimension `seq_dim`.
--- 
--- The elements of `seq_lengths` must obey `seq_lengths[i] < input.dims[seq_dim]`,
--- and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
--- 
--- The output slice `i` along dimension `batch_dim` is then given by input
--- slice `i`, with the first `seq_lengths[i]` slices along dimension
--- `seq_dim` reversed.
--- 
--- For example:
--- 
--- ```prettyprint
--- # Given this:
--- batch_dim = 0
--- seq_dim = 1
--- input.dims = (4, 8, ...)
--- seq_lengths = [7, 2, 3, 5]
--- 
--- # then slices of input are reversed on seq_dim, but only up to seq_lengths:
--- output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
--- output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
--- output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
--- output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
--- 
--- # while entries past seq_lens are copied through:
--- output[0, 7:, :, ...] = input[0, 7:, :, ...]
--- output[1, 2:, :, ...] = input[1, 2:, :, ...]
--- output[2, 3:, :, ...] = input[2, 3:, :, ...]
--- output[3, 2:, :, ...] = input[3, 2:, :, ...]
--- ```
--- 
--- In contrast, if:
--- 
--- ```prettyprint
--- # Given this:
--- batch_dim = 2
--- seq_dim = 0
--- input.dims = (8, ?, 4, ...)
--- seq_lengths = [7, 2, 3, 5]
--- 
--- # then slices of input are reversed on seq_dim, but only up to seq_lengths:
--- output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
--- output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
--- output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
--- output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
--- 
--- # while entries past seq_lens are copied through:
--- output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
--- output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
--- output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
--- output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
--- ```
-reverseSequence :: forall v1 v2 t tlen . (TensorType t, TensorType tlen,
-                                          OneOf '[Data.Int.Int32,
-                                                  Data.Int.Int64] tlen) =>
-                   Data.Int.Int64 -- ^ __seq_dim__: The dimension which is partially reversed.
-                   -> Tensor v1 t -- ^ __input__: The input to reverse.
-                   -> Tensor v2 tlen -- ^ __seq_lengths__: 1-D with length `input.dims(batch_dim)` and
-                                     -- `max(seq_lengths) < input.dims(seq_dim)`
-                   -> Tensor Value t -- ^ __output__: The partially reversed input. It has the same shape as `input`.
-reverseSequence seq_dim input seq_lengths | eqLengthGuard [] =
-    buildOp (opDef "ReverseSequence"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tlen" .~ tensorType (undefined :: tlen)
-             & opAttr "seq_dim" .~ seq_dim)
-        input seq_lengths
-{-
-attr {
-  description: "The dimension which is partially reversed."
-  name: "seq_dim"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "The dimension along which reversal is performed."
-  name: "batch_dim"
-  type: "int"
-}
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT64 }
-  name: "Tlen"
-  type: "type"
-}
-input_arg {
-  description: "The input to reverse." name: "input" type_attr: "T"
-}
-input_arg {
-  description: "1-D with length `input.dims(batch_dim)` and\n`max(seq_lengths) < input.dims(seq_dim)`"
-  name: "seq_lengths"
-  type_attr: "Tlen"
-}
-output_arg {
-  description: "The partially reversed input. It has the same shape as `input`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | The backward operation for "BiasAdd" on the "bias" tensor.
---
--- It accumulates all the values from out_backprop into the feature dimension.
--- For NHWC data format, the feature dimension is the last. For NCHW data format,
--- the feature dimension is the third-to-last.
-biasAddGrad :: forall v1 t . (TensorType t,
-                              OneOf '[(Data.Complex.Complex Double),
-                                      (Data.Complex.Complex Float),
-                                      Data.Int.Int16, Data.Int.Int32,
-                                      Data.Int.Int64, Data.Int.Int8,
-                                      Data.Word.Word16, Data.Word.Word8, Double,
-                                      Float] t) =>
-               Tensor v1 t -- ^ __out_backprop__: Any number of dimensions.
-               -> Tensor Value t -- ^ __output__: 1-D with size the feature dimension of `out_backprop`.
-biasAddGrad out_backprop | eqLengthGuard [] =
-    buildOp (opDef "BiasAddGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        out_backprop
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "NHWC" s: "NCHW" } }
-  default_value { s: "NHWC" }
-  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n    dimension."
-  name: "data_format"
-  type: "string"
-}
-input_arg {
-  description: "Any number of dimensions."
-  name: "out_backprop"
-  type_attr: "T"
-}
-output_arg {
-  description: "1-D with size the feature dimension of `out_backprop`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
---
--- A `SparseTensor` is represented by three tensors: `sparse_indices`,
--- `sparse_values`, and `sparse_shape`.
--- 
--- This operator takes the given `SparseTensor` and adds it to a container
--- object (a `SparseTensorsMap`).  A unique key within this container is generated
--- in the form of an `int64`, and this is the value that is returned.
--- 
--- The `SparseTensor` can then be read out as part of a minibatch by passing
--- the key as a vector element to `TakeManySparseFromTensorsMap`.  To ensure
--- the correct `SparseTensorsMap` is accessed, ensure that the same
--- `container` and `shared_name` are passed to that Op.  If no `shared_name`
--- is provided here, instead use the *name* of the Operation created by calling
--- `AddSparseToTensorsMap` as the `shared_name` passed to
--- `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
-addSparseToTensorsMap :: forall v1 v2 v3 t . (TensorType t) =>
-                         Tensor v1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the `SparseTensor`.
-                         -> Tensor v2 t -- ^ __sparse_values__: 1-D.  The `values` of the `SparseTensor`.
-                         -> Tensor v3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the `SparseTensor`.
-                         -> Build (Tensor Value Data.Int.Int64) -- ^ __sparse_handle__: 0-D.  The handle of the `SparseTensor` now stored in the
-                         -- `SparseTensorsMap`.
-addSparseToTensorsMap sparse_indices sparse_values
-                      sparse_shape | eqLengthGuard [] =
-    buildOp (opDef "AddSparseToTensorsMap"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        sparse_indices sparse_values sparse_shape
-{-
-attr { name: "T" type: "type" }
-attr {
-  default_value { s: "" }
-  description: "The container name for the `SparseTensorsMap` created by this op."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation\'s unique name is used."
-  name: "shared_name"
-  type: "string"
-}
-input_arg {
-  description: "2-D.  The `indices` of the `SparseTensor`."
-  name: "sparse_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  The `values` of the `SparseTensor`."
-  name: "sparse_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  The `shape` of the `SparseTensor`."
-  name: "sparse_shape"
-  type: DT_INT64
-}
-output_arg {
-  description: "0-D.  The handle of the `SparseTensor` now stored in the\n`SparseTensorsMap`."
-  name: "sparse_handle"
-  type: DT_INT64
-}
--}
-
--- | Computes tan of x element-wise.
-
-tan :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                            (Data.Complex.Complex Float),
-                                            Data.Int.Int32, Data.Int.Int64,
-                                            Data.Word.Word16, Double,
-                                            Float] t) => Tensor v1 t -- ^ __x__
-       -> Tensor Value t -- ^ __y__
-tan x | eqLengthGuard [] =
-    buildOp (opDef "Tan"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Computes the sum of elements across dimensions of a SparseTensor.
---
--- This Op takes a SparseTensor and is the sparse counterpart to
--- `tf.reduce_sum()`.  In contrast to SparseReduceSum, this Op returns a
--- SparseTensor.
--- 
--- Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
--- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
--- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
--- with length 1.
--- 
--- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
--- with a single element is returned.  Additionally, the axes can be negative,
--- which are interpreted according to the indexing rules in Python.
-sparseReduceSumSparse :: forall v1 v2 v3 v4 t . (TensorType t,
-                                                 OneOf '[(Data.Complex.Complex Double),
-                                                         (Data.Complex.Complex Float),
-                                                         Data.Int.Int16,
-                                                         Data.Int.Int32,
-                                                         Data.Int.Int64,
-                                                         Data.Int.Int8,
-                                                         Data.Word.Word16,
-                                                         Data.Word.Word8,
-                                                         Double, Float] t) =>
-                         Tensor v1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
-                                                  -- SparseTensor, possibly not in canonical ordering.
-                         -> Tensor v2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
-                         -> Tensor v3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
-                         -> Tensor v4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
-                         -> (Tensor Value Data.Int.Int64, Tensor Value t,
-                             Tensor Value Data.Int.Int64)
-                         -- ^ (__output_indices__, __output_values__, __output_shape__)
-                         --
-                         -- * __output_indices__
-                         --
-                         -- * __output_values__
-                         --
-                         -- * __output_shape__
-sparseReduceSumSparse input_indices input_values input_shape
-                      reduction_axes | eqLengthGuard [] =
-    buildOp (opDef "SparseReduceSumSparse"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input_indices input_values input_shape reduction_axes
-{-
-attr {
-  default_value { b: false }
-  description: "If true, retain reduced dimensions with length 1."
-  name: "keep_dims"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
-  name: "input_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
-  name: "input_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  Shape of the input SparseTensor."
-  name: "input_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  Length-`K` vector containing the reduction axes."
-  name: "reduction_axes"
-  type: DT_INT32
-}
-output_arg { name: "output_indices" type: DT_INT64 }
-output_arg { name: "output_values" type_attr: "T" }
-output_arg { name: "output_shape" type: DT_INT64 }
--}
-
--- | Returns shape of tensors.
---
--- This operation returns N 1-D integer tensors representing shape of `input[i]s`.
-shapeN :: forall v1 t out_type . (TensorType t, TensorType out_type,
-                                  OneOf '[Data.Int.Int32,
-                                          Data.Int.Int64] out_type) =>
-          [Tensor v1 t] -- ^ __input__
-          -> [Tensor Value out_type] -- ^ __output__
-shapeN input | eqLengthGuard [("N", [("input", length input)])] =
-    buildListOp [n] (opDef "ShapeN"
-                     & opAttr "T" .~ tensorType (undefined :: t)
-                     & opAttr "out_type" .~ tensorType (undefined :: out_type)
-                     & opAttr "N" .~ n)
-        input
-  where
-    n = fromIntegral (length input) :: Int64
-{-
-attr { has_minimum: true minimum: 1 name: "N" type: "int" }
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "out_type"
-  type: "type"
-}
-input_arg { name: "input" number_attr: "N" type_attr: "T" }
-output_arg {
-  name: "output" number_attr: "N" type_attr: "out_type"
-}
--}
-
--- | Returns the shape of a tensor.
---
--- This operation returns a 1-D integer tensor representing the shape of `input`.
--- 
--- For example:
--- 
--- ```prettyprint
--- # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
--- shape(t) ==> [2, 2, 3]
--- ```
-shape :: forall v1 t out_type . (TensorType t, TensorType out_type,
-                                 OneOf '[Data.Int.Int32,
-                                         Data.Int.Int64] out_type) =>
-         Tensor v1 t -- ^ __input__
-         -> Tensor Value out_type -- ^ __output__
-shape input | eqLengthGuard [] =
-    buildOp (opDef "Shape"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        input
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "out_type"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "out_type" }
--}
-
--- | Finds unique elements in a 1-D tensor.
---
--- This operation returns a tensor `y` containing all of the unique elements of `x`
--- sorted in the same order that they occur in `x`. This operation also returns a
--- tensor `idx` the same size as `x` that contains the index of each value of `x`
--- in the unique output `y`. In other words:
--- 
--- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
--- 
--- For example:
--- 
--- ```prettyprint
--- # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
--- y, idx = unique(x)
--- y ==> [1, 2, 4, 7, 8]
--- idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
--- ```
-unique :: forall v1 t out_idx . (TensorType t, TensorType out_idx,
-                                 OneOf '[Data.Int.Int32,
-                                         Data.Int.Int64] out_idx) =>
-          Tensor v1 t -- ^ __x__: 1-D.
-          -> (Tensor Value t, Tensor Value out_idx) -- ^ (__y__, __idx__)
-          --
-          -- * __y__: 1-D.
-          --
-          -- * __idx__: 1-D.
-unique x | eqLengthGuard [] =
-    buildOp (opDef "Unique"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "out_idx" .~ tensorType (undefined :: out_idx))
-        x
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "out_idx"
-  type: "type"
-}
-input_arg { description: "1-D." name: "x" type_attr: "T" }
-output_arg { description: "1-D." name: "y" type_attr: "T" }
-output_arg { description: "1-D." name: "idx" type_attr: "out_idx" }
--}
-
--- | Outputs random values from a truncated normal distribution.
---
--- The generated values follow a normal distribution with mean 0 and standard
--- deviation 1, except that values whose magnitude is more than 2 standard
--- deviations from the mean are dropped and re-picked.
-truncatedNormal :: forall v1 dtype t . (TensorType dtype,
-                                        OneOf '[Data.Word.Word16, Double,
-                                                Float] dtype, TensorType t,
-                                        OneOf '[Data.Int.Int32,
-                                                Data.Int.Int64] t) =>
-                   Tensor v1 t -- ^ __shape__: The shape of the output tensor.
-                   -> Build (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with random truncated normal
-                   -- values.
-truncatedNormal shape | eqLengthGuard [] =
-    buildOp (opDef "TruncatedNormal"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "T" .~ tensorType (undefined :: t))
-        shape
-{-
-attr {
-  default_value { i: 0 }
-  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "A second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  description: "The type of the output."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The shape of the output tensor."
-  name: "shape"
-  type_attr: "T"
-}
-output_arg {
-  description: "A tensor of the specified shape filled with random truncated normal\nvalues."
-  name: "output"
-  type_attr: "dtype"
-}
--}
-
--- | Computes the inverse permutation of a tensor.
---
--- This operation computes the inverse of an index permutation. It takes a 1-D
--- integer tensor `x`, which represents the indices of a zero-based array, and
--- swaps each value with its index position. In other words, for an output tensor
--- `y` and an input tensor `x`, this operation computes the following:
--- 
--- `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
--- 
--- The values must include 0. There can be no duplicate values or negative values.
--- 
--- For example:
--- 
--- ```prettyprint
--- # tensor `x` is [3, 4, 0, 2, 1]
--- invert_permutation(x) ==> [2, 4, 3, 0, 1]
--- ```
-invertPermutation :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int32,
-                                                          Data.Int.Int64] t) =>
-                     Tensor v1 t -- ^ __x__: 1-D.
-                     -> Tensor Value t -- ^ __y__: 1-D.
-invertPermutation x | eqLengthGuard [] =
-    buildOp (opDef "InvertPermutation"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "T"
-  type: "type"
-}
-input_arg { description: "1-D." name: "x" type_attr: "T" }
-output_arg { description: "1-D." name: "y" type_attr: "T" }
--}
-
--- | Checks a tensor for NaN and Inf values.
---
--- When run, reports an `InvalidArgument` error if `tensor` has any values
--- that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
-checkNumerics :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                                      Float] t) =>
-                 Tensor v1 t -- ^ __tensor__
-                 -> Tensor Value t -- ^ __output__
-checkNumerics tensor | eqLengthGuard [] =
-    buildOp (opDef "CheckNumerics"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        tensor
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "Prefix of the error message."
-  name: "message"
-  type: "string"
-}
-input_arg { name: "tensor" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Generates labels for candidate sampling with a uniform distribution.
---
--- See explanations of candidate sampling and the data formats at
--- go/candidate-sampling.
--- 
--- For each batch, this op picks a single set of sampled candidate labels.
--- 
--- The advantages of sampling candidates per-batch are simplicity and the
--- possibility of efficient dense matrix multiplication. The disadvantage is that
--- the sampled candidates must be chosen independently of the context and of the
--- true labels.
-uniformCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch.
-                           -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
-                           -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
-                           -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
-                                   -- candidates in a batch are unique. This requires some approximation to
-                                   -- estimate the post-rejection sampling probabilities.
-                           -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
-                                                       -- IDs of the num_true target_classes in the corresponding original label.
-                           -> (Tensor Value Data.Int.Int64, Tensor Value Float,
-                               Tensor Value Float)
-                           -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
-                           --
-                           -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
-                           -- the ID of a sampled candidate.
-                           --
-                           -- * __true_expected_count__: A batch_size * num_true matrix, representing
-                           -- the number of times each candidate is expected to occur in a batch
-                           -- of sampled candidates. If unique=true, then this is a probability.
-                           --
-                           -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
-                           -- candidate representing the number of times the candidate is expected
-                           -- to occur in a batch of sampled candidates.  If unique=true, then this is a
-                           -- probability.
-uniformCandidateSampler num_sampled num_true range_max unique
-                        true_classes | eqLengthGuard [] =
-    buildOp (opDef "UniformCandidateSampler"
-             & opAttr "num_sampled" .~ num_sampled
-             & opAttr "num_true" .~ num_true
-             & opAttr "range_max" .~ range_max
-             & opAttr "unique" .~ unique)
-        true_classes
-{-
-attr {
-  description: "Number of true labels per context."
-  has_minimum: true
-  minimum: 1
-  name: "num_true"
-  type: "int"
-}
-attr {
-  description: "Number of candidates to randomly sample per batch."
-  has_minimum: true
-  minimum: 1
-  name: "num_sampled"
-  type: "int"
-}
-attr {
-  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
-  name: "unique"
-  type: "bool"
-}
-attr {
-  description: "The sampler will sample integers from the interval [0, range_max)."
-  has_minimum: true
-  minimum: 1
-  name: "range_max"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "An second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-input_arg {
-  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
-  name: "true_classes"
-  type: DT_INT64
-}
-output_arg {
-  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
-  name: "sampled_candidates"
-  type: DT_INT64
-}
-output_arg {
-  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
-  name: "true_expected_count"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
-  name: "sampled_expected_count"
-  type: DT_FLOAT
-}
--}
-
--- | Gather slices from `params` according to `indices`.
---
--- `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
--- Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
--- 
--- ```python
---     # Scalar indices
---     output[:, ..., :] = params[indices, :, ... :]
--- 
---     # Vector indices
---     output[i, :, ..., :] = params[indices[i], :, ... :]
--- 
---     # Higher rank indices
---     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
--- ```
--- 
--- If `indices` is a permutation and `len(indices) == params.shape[0]` then
--- this operation will permute `params` accordingly.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/Gather.png" alt>
--- </div>
-gather :: forall v1 v2 tparams tindices . (TensorType tparams,
-                                           TensorType tindices,
-                                           OneOf '[Data.Int.Int32,
-                                                   Data.Int.Int64] tindices) =>
-          Tensor v1 tparams -- ^ __params__
-          -> Tensor v2 tindices -- ^ __indices__
-          -> Tensor Value tparams -- ^ __output__
-gather params indices | eqLengthGuard [] =
-    buildOp (opDef "Gather"
-             & opAttr "Tparams" .~ tensorType (undefined :: tparams)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        params indices
-{-
-attr {
-  default_value { b: true } name: "validate_indices" type: "bool"
-}
-attr { name: "Tparams" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg { name: "params" type_attr: "Tparams" }
-input_arg { name: "indices" type_attr: "Tindices" }
-output_arg { name: "output" type_attr: "Tparams" }
--}
-
--- | Returns a constant tensor.
-
-const :: forall dtype . (TensorType dtype) => Tensor Value dtype -- ^ __output__
-const  | eqLengthGuard [] =
-    buildOp (opDef "Const"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        
-{-
-attr {
-  description: "Attr `value` is the tensor to return."
-  name: "value"
-  type: "tensor"
-}
-attr { name: "dtype" type: "type" }
-output_arg { name: "output" type_attr: "dtype" }
--}
-
--- | Creates a tensor filled with a scalar value.
---
--- This operation creates a tensor of shape `dims` and fills it with `value`.
--- 
--- For example:
--- 
--- ```prettyprint
--- # Output tensor has shape [2, 3].
--- fill([2, 3], 9) ==> [[9, 9, 9]
---                      [9, 9, 9]]
--- ```
-fill :: forall v1 v2 t . (TensorType t) =>
-        Tensor v1 Data.Int.Int32 -- ^ __dims__: 1-D. Represents the shape of the output tensor.
-        -> Tensor v2 t -- ^ __value__: 0-D (scalar). Value to fill the returned tensor.
-                       -- 
-                       -- @compatibility(numpy)
-                       -- Equivalent to np.full
-                       -- @end_compatibility
-        -> Tensor Value t -- ^ __output__
-fill dims value | eqLengthGuard [] =
-    buildOp (opDef "Fill"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        dims value
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "1-D. Represents the shape of the output tensor."
-  name: "dims"
-  type: DT_INT32
-}
-input_arg {
-  description: "0-D (scalar). Value to fill the returned tensor.\n\n@compatibility(numpy)\nEquivalent to np.full\n@end_compatibility"
-  name: "value"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Computes the (possibly normalized) Levenshtein Edit Distance.
---
--- The inputs are variable-length sequences provided by SparseTensors
---   (hypothesis_indices, hypothesis_values, hypothesis_shape)
--- and
---   (truth_indices, truth_values, truth_shape).
--- 
--- The inputs are:
-editDistance :: forall v1 v2 v3 v4 v5 v6 t . (TensorType t) =>
-                Tensor v1 Data.Int.Int64 -- ^ __hypothesis_indices__: The indices of the hypothesis list SparseTensor.
-                                         -- This is an N x R int64 matrix.
-                -> Tensor v2 t -- ^ __hypothesis_values__: The values of the hypothesis list SparseTensor.
-                               -- This is an N-length vector.
-                -> Tensor v3 Data.Int.Int64 -- ^ __hypothesis_shape__: The shape of the hypothesis list SparseTensor.
-                                            -- This is an R-length vector.
-                -> Tensor v4 Data.Int.Int64 -- ^ __truth_indices__: The indices of the truth list SparseTensor.
-                                            -- This is an M x R int64 matrix.
-                -> Tensor v5 t -- ^ __truth_values__: The values of the truth list SparseTensor.
-                               -- This is an M-length vector.
-                -> Tensor v6 Data.Int.Int64 -- ^ __truth_shape__: truth indices, vector.
-                -> Tensor Value Float -- ^ __output__: A dense float tensor with rank R - 1.
-                -- 
-                -- For the example input:
-                -- 
-                --     // hypothesis represents a 2x1 matrix with variable-length values:
-                --     //   (0,0) = ["a"]
-                --     //   (1,0) = ["b"]
-                --     hypothesis_indices = [[0, 0, 0],
-                --                           [1, 0, 0]]
-                --     hypothesis_values = ["a", "b"]
-                --     hypothesis_shape = [2, 1, 1]
-                -- 
-                --     // truth represents a 2x2 matrix with variable-length values:
-                --     //   (0,0) = []
-                --     //   (0,1) = ["a"]
-                --     //   (1,0) = ["b", "c"]
-                --     //   (1,1) = ["a"]
-                --     truth_indices = [[0, 1, 0],
-                --                      [1, 0, 0],
-                --                      [1, 0, 1],
-                --                      [1, 1, 0]]
-                --     truth_values = ["a", "b", "c", "a"]
-                --     truth_shape = [2, 2, 2]
-                --     normalize = true
-                -- 
-                -- The output will be:
-                -- 
-                --     // output is a 2x2 matrix with edit distances normalized by truth lengths.
-                --     output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis
-                --               [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis
-editDistance hypothesis_indices hypothesis_values hypothesis_shape truth_indices
-             truth_values truth_shape | eqLengthGuard [] =
-    buildOp (opDef "EditDistance"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        hypothesis_indices hypothesis_values hypothesis_shape truth_indices
-        truth_values truth_shape
-{-
-attr {
-  default_value { b: true }
-  description: "boolean (if true, edit distances are normalized by length of truth).\n\nThe output is:"
-  name: "normalize"
-  type: "bool"
-}
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The indices of the hypothesis list SparseTensor.\nThis is an N x R int64 matrix."
-  name: "hypothesis_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "The values of the hypothesis list SparseTensor.\nThis is an N-length vector."
-  name: "hypothesis_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "The shape of the hypothesis list SparseTensor.\nThis is an R-length vector."
-  name: "hypothesis_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "The indices of the truth list SparseTensor.\nThis is an M x R int64 matrix."
-  name: "truth_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "The values of the truth list SparseTensor.\nThis is an M-length vector."
-  name: "truth_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "truth indices, vector."
-  name: "truth_shape"
-  type: DT_INT64
-}
-output_arg {
-  description: "A dense float tensor with rank R - 1.\n\nFor the example input:\n\n    // hypothesis represents a 2x1 matrix with variable-length values:\n    //   (0,0) = [\"a\"]\n    //   (1,0) = [\"b\"]\n    hypothesis_indices = [[0, 0, 0],\n                          [1, 0, 0]]\n    hypothesis_values = [\"a\", \"b\"]\n    hypothesis_shape = [2, 1, 1]\n\n    // truth represents a 2x2 matrix with variable-length values:\n    //   (0,0) = []\n    //   (0,1) = [\"a\"]\n    //   (1,0) = [\"b\", \"c\"]\n    //   (1,1) = [\"a\"]\n    truth_indices = [[0, 1, 0],\n                     [1, 0, 0],\n                     [1, 0, 1],\n                     [1, 1, 0]]\n    truth_values = [\"a\", \"b\", \"c\", \"a\"]\n    truth_shape = [2, 2, 2]\n    normalize = true\n\nThe output will be:\n\n    // output is a 2x2 matrix with edit distances normalized by truth lengths.\n    output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis\n              [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis"
-  name: "output"
-  type: DT_FLOAT
-}
--}
-
--- | Reverses specific dimensions of a tensor.
---
--- Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
--- of `tensor`, this operation reverses each dimension i of `tensor` where
--- `dims[i]` is `True`.
--- 
--- `tensor` can have up to 8 dimensions. The number of dimensions
--- of `tensor` must equal the number of elements in `dims`. In other words:
--- 
--- `rank(tensor) = size(dims)`
--- 
--- For example:
--- 
--- ```prettyprint
--- # tensor 't' is [[[[ 0,  1,  2,  3],
--- #                  [ 4,  5,  6,  7],
--- #                  [ 8,  9, 10, 11]],
--- #                 [[12, 13, 14, 15],
--- #                  [16, 17, 18, 19],
--- #                  [20, 21, 22, 23]]]]
--- # tensor 't' shape is [1, 2, 3, 4]
--- 
--- # 'dims' is [False, False, False, True]
--- reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
---                         [ 7,  6,  5,  4],
---                         [ 11, 10, 9, 8]],
---                        [[15, 14, 13, 12],
---                         [19, 18, 17, 16],
---                         [23, 22, 21, 20]]]]
--- 
--- # 'dims' is [False, True, False, False]
--- reverse(t, dims) ==> [[[[12, 13, 14, 15],
---                         [16, 17, 18, 19],
---                         [20, 21, 22, 23]
---                        [[ 0,  1,  2,  3],
---                         [ 4,  5,  6,  7],
---                         [ 8,  9, 10, 11]]]]
--- 
--- # 'dims' is [False, False, True, False]
--- reverse(t, dims) ==> [[[[8, 9, 10, 11],
---                         [4, 5, 6, 7],
---                         [0, 1, 2, 3]]
---                        [[20, 21, 22, 23],
---                         [16, 17, 18, 19],
---                         [12, 13, 14, 15]]]]
--- ```
-reverse :: forall v1 v2 t . (TensorType t,
-                             OneOf '[(Data.Complex.Complex Double),
-                                     (Data.Complex.Complex Float), Bool,
-                                     Data.Int.Int32, Data.Int.Int64,
-                                     Data.Int.Int8, Data.Word.Word16,
-                                     Data.Word.Word8, Double, Float] t) =>
-           Tensor v1 t -- ^ __tensor__: Up to 8-D.
-           -> Tensor v2 Bool -- ^ __dims__: 1-D. The dimensions to reverse.
-           -> Tensor Value t -- ^ __output__: The same shape as `tensor`.
-reverse tensor dims | eqLengthGuard [] =
-    buildOp (opDef "Reverse"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        tensor dims
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_BOOL
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Up to 8-D." name: "tensor" type_attr: "T"
-}
-input_arg {
-  description: "1-D. The dimensions to reverse."
-  name: "dims"
-  type: DT_BOOL
-}
-output_arg {
-  description: "The same shape as `tensor`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Returns a batched matrix tensor with new batched diagonal values.
---
--- Given `input` and `diagonal`, this operation returns a tensor with the
--- same shape and values as `input`, except for the main diagonal of the
--- innermost matrices.  These will be overwritten by the values in `diagonal`.
--- 
--- The output is computed as follows:
--- 
--- Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
--- `k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a
--- tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
--- 
---   * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
---   * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
-matrixSetDiag :: forall v1 v2 t . (TensorType t) =>
-                 Tensor v1 t -- ^ __input__: Rank `k+1`, where `k >= 1`.
-                 -> Tensor v2 t -- ^ __diagonal__: Rank `k`, where `k >= 1`.
-                 -> Tensor Value t -- ^ __output__: Rank `k+1`, with `output.shape = input.shape`.
-matrixSetDiag input diagonal | eqLengthGuard [] =
-    buildOp (opDef "MatrixSetDiag"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input diagonal
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "Rank `k+1`, where `k >= 1`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "Rank `k`, where `k >= 1`."
-  name: "diagonal"
-  type_attr: "T"
-}
-output_arg {
-  description: "Rank `k+1`, with `output.shape = input.shape`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Returns a batched diagonal tensor with a given batched diagonal values.
---
--- Given a `diagonal`, this operation returns a tensor with the `diagonal` and
--- everything else padded with zeros. The diagonal is computed as follows:
--- 
--- Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
--- tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
--- 
--- `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
--- 
--- For example:
--- 
--- ```prettyprint
--- # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
--- 
--- and diagonal.shape = (2, 4)
--- 
--- tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
---                                      [0, 2, 0, 0]
---                                      [0, 0, 3, 0]
---                                      [0, 0, 0, 4]],
---                                     [[5, 0, 0, 0]
---                                      [0, 6, 0, 0]
---                                      [0, 0, 7, 0]
---                                      [0, 0, 0, 8]]]
--- 
--- which has shape (2, 4, 4)
--- ```
-matrixDiag :: forall v1 t . (TensorType t) =>
-              Tensor v1 t -- ^ __diagonal__: Rank `k`, where `k >= 1`.
-              -> Tensor Value t -- ^ __output__: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
-matrixDiag diagonal | eqLengthGuard [] =
-    buildOp (opDef "MatrixDiag"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        diagonal
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "Rank `k`, where `k >= 1`."
-  name: "diagonal"
-  type_attr: "T"
-}
-output_arg {
-  description: "Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Returns a diagonal tensor with a given diagonal values.
---
--- Given a `diagonal`, this operation returns a tensor with the `diagonal` and
--- everything else padded with zeros. The diagonal is computed as follows:
--- 
--- Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
--- rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
--- 
--- `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
--- 
--- For example:
--- 
--- ```prettyprint
--- # 'diagonal' is [1, 2, 3, 4]
--- tf.diag(diagonal) ==> [[1, 0, 0, 0]
---                        [0, 2, 0, 0]
---                        [0, 0, 3, 0]
---                        [0, 0, 0, 4]]
--- ```
-diag :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                             (Data.Complex.Complex Float),
-                                             Data.Int.Int32, Data.Int.Int64,
-                                             Double, Float] t) =>
-        Tensor v1 t -- ^ __diagonal__: Rank k tensor where k is at most 3.
-        -> Tensor Value t -- ^ __output__
-diag diagonal | eqLengthGuard [] =
-    buildOp (opDef "Diag"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        diagonal
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Rank k tensor where k is at most 3."
-  name: "diagonal"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Returns immutable tensor from memory region.
---
--- The current implementation memmaps the tensor from a file.
-immutableConst :: forall dtype . (TensorType dtype) =>
-                  Shape -- ^ __shape__: Shape of the returned tensor.
-                  -> Tensor Value dtype -- ^ __tensor__
-immutableConst shape | eqLengthGuard [] =
-    buildOp (opDef "ImmutableConst"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "shape" .~ shape)
-        
-{-
-attr {
-  description: "Type of the returned tensor."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  description: "Shape of the returned tensor."
-  name: "shape"
-  type: "shape"
-}
-attr {
-  description: "Name of readonly memory region used by the tensor, see\nNewReadOnlyMemoryRegionFromFile in tensorflow::Env."
-  name: "memory_region_name"
-  type: "string"
-}
-output_arg { name: "tensor" type_attr: "dtype" }
--}
-
--- | Concatenates tensors along one dimension.
-
-concat :: forall v1 v2 t . (TensorType t) =>
-          Tensor v1 Data.Int.Int32 -- ^ __concat_dim__: 0-D.  The dimension along which to concatenate.  Must be in the
-                                   -- range [0, rank(values)).
-          -> [Tensor v2 t] -- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match,
-                           -- and their sizes must match in all dimensions except `concat_dim`.
-          -> Tensor Value t -- ^ __output__: A `Tensor` with the concatenation of values stacked along the
-          -- `concat_dim` dimension.  This tensor's shape matches that of `values` except
-          -- in `concat_dim` where it has the sum of the sizes.
-concat concat_dim values | eqLengthGuard [("N", [("values", length values)])] =
-    buildOp (opDef "Concat"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "N" .~ n)
-        concat_dim values
-  where
-    n = fromIntegral (length values) :: Int64
-{-
-attr { has_minimum: true minimum: 2 name: "N" type: "int" }
-attr { name: "T" type: "type" }
-input_arg {
-  description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [0, rank(values))."
-  name: "concat_dim"
-  type: DT_INT32
-}
-input_arg {
-  description: "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
-  name: "values"
-  number_attr: "N"
-  type_attr: "T"
-}
-output_arg {
-  description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
---
--- Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
--- For example, given a tensor of shape `(A, B, C, D)`;
--- 
--- If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
---   and each tensor in `output` will have shape `(B, C, D)`. (Note that the
---   dimension unpacked along is gone, unlike `split`).
--- 
--- If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
---   and each tensor in `output` will have shape `(A, C, D)`.
--- Etc.
--- 
--- This is the opposite of `pack`.
-unpack :: forall v1 t . (TensorType t) => Data.Int.Int64 -- ^ __num__
-          -> Tensor v1 t -- ^ __value__: 1-D or higher, with `axis` dimension size equal to `num`.
-          -> [Tensor Value t] -- ^ __output__: The list of tensors unpacked from `value`.
-unpack num value | eqLengthGuard [] =
-    buildListOp [num] (opDef "Unpack"
-                       & opAttr "T" .~ tensorType (undefined :: t)
-                       & opAttr "num" .~ num)
-        value
-{-
-attr { has_minimum: true name: "num" type: "int" }
-attr { name: "T" type: "type" }
-attr {
-  default_value { i: 0 }
-  description: "Dimension along which to unpack.  Negative values wrap around, so the\nvalid range is `[-R, R)`."
-  name: "axis"
-  type: "int"
-}
-input_arg {
-  description: "1-D or higher, with `axis` dimension size equal to `num`."
-  name: "value"
-  type_attr: "T"
-}
-output_arg {
-  description: "The list of tensors unpacked from `value`."
-  name: "output"
-  number_attr: "num"
-  type_attr: "T"
-}
--}
-
--- | Output a fact about factorials.
-
-fact :: Tensor Value Data.ByteString.ByteString -- ^ __fact__
-fact  | eqLengthGuard [] =
-    buildOp (opDef "Fact")
-        
-{-
-output_arg { name: "fact" type: DT_STRING }
--}
-
--- | Computes the absolute value of a tensor.
---
--- Given a tensor `x`, this operation returns a tensor containing the absolute
--- value of each element in `x`. For example, if x is an input element and y is
--- an output element, this operation computes \\(y = |x|\\).
-abs :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64,
-                                            Data.Word.Word16, Double,
-                                            Float] t) => Tensor v1 t -- ^ __x__
-       -> Tensor Value t -- ^ __y__
-abs x | eqLengthGuard [] =
-    buildOp (opDef "Abs"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Computes softmax activations.
---
--- For each batch `i` and class `j` we have
--- 
---     softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
-softmax :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                                Float] t) =>
-           Tensor v1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`.
-           -> Tensor Value t -- ^ __softmax__: Same shape as `logits`.
-softmax logits | eqLengthGuard [] =
-    buildOp (opDef "Softmax"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        logits
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "2-D with shape `[batch_size, num_classes]`."
-  name: "logits"
-  type_attr: "T"
-}
-output_arg {
-  description: "Same shape as `logits`."
-  name: "softmax"
-  type_attr: "T"
-}
--}
-
--- | Reverses specific dimensions of a tensor.
---
--- Given a `tensor`, and a `int32` tensor `axis` representing the set of
--- dimensions of `tensor` to reverse. This operation reverses each dimension
--- `i` for which there exists `j` s.t. `axis[j] == i`.
--- 
--- `tensor` can have up to 8 dimensions. The number of dimensions specified
--- in `axis` may be 0 or more entries. If an index is specified more than
--- once, a InvalidArgument error is raised.
--- 
--- For example:
--- 
--- ```prettyprint
--- # tensor 't' is [[[[ 0,  1,  2,  3],
--- #                  [ 4,  5,  6,  7],
--- #                  [ 8,  9, 10, 11]],
--- #                 [[12, 13, 14, 15],
--- #                  [16, 17, 18, 19],
--- #                  [20, 21, 22, 23]]]]
--- # tensor 't' shape is [1, 2, 3, 4]
--- 
--- # 'dims' is [3] or 'dims' is -1
--- reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
---                         [ 7,  6,  5,  4],
---                         [ 11, 10, 9, 8]],
---                        [[15, 14, 13, 12],
---                         [19, 18, 17, 16],
---                         [23, 22, 21, 20]]]]
--- 
--- # 'dims' is '[1]' (or 'dims' is '[-3]')
--- reverse(t, dims) ==> [[[[12, 13, 14, 15],
---                         [16, 17, 18, 19],
---                         [20, 21, 22, 23]
---                        [[ 0,  1,  2,  3],
---                         [ 4,  5,  6,  7],
---                         [ 8,  9, 10, 11]]]]
--- 
--- # 'dims' is '[2]' (or 'dims' is '[-2]')
--- reverse(t, dims) ==> [[[[8, 9, 10, 11],
---                         [4, 5, 6, 7],
---                         [0, 1, 2, 3]]
---                        [[20, 21, 22, 23],
---                         [16, 17, 18, 19],
---                         [12, 13, 14, 15]]]]
--- ```
-reverseV2 :: forall v1 v2 tidx t . (TensorType tidx, OneOf '[Data.Int.Int32,
-                                                             Data.Int.Int64] tidx,
-                                    TensorType t,
-                                    OneOf '[(Data.Complex.Complex Double),
-                                            (Data.Complex.Complex Float), Bool,
-                                            Data.Int.Int32, Data.Int.Int64,
-                                            Data.Int.Int8, Data.Word.Word16,
-                                            Data.Word.Word8, Double,
-                                            Float] t) =>
-             Tensor v1 t -- ^ __tensor__: Up to 8-D.
-             -> Tensor v2 tidx -- ^ __axis__: 1-D. The indices of the dimensions to reverse.
-             -> Tensor Value t -- ^ __output__: The same shape as `tensor`.
-reverseV2 tensor axis | eqLengthGuard [] =
-    buildOp (opDef "ReverseV2"
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx)
-             & opAttr "T" .~ tensorType (undefined :: t))
-        tensor axis
-{-
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_BOOL
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Up to 8-D." name: "tensor" type_attr: "T"
-}
-input_arg {
-  description: "1-D. The indices of the dimensions to reverse."
-  name: "axis"
-  type_attr: "Tidx"
-}
-output_arg {
-  description: "The same shape as `tensor`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Return a tensor with the same shape and contents as the input tensor or value.
-
-identity :: forall v1 t . (TensorType t) => Tensor v1 t -- ^ __input__
-            -> Tensor Value t -- ^ __output__
-identity input | eqLengthGuard [] =
-    buildOp (opDef "Identity"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { name: "T" type: "type" }
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Adds two `SparseTensor` objects to produce another `SparseTensor`.
---
--- The input `SparseTensor` objects' indices are assumed ordered in standard
--- lexicographic order.  If this is not the case, before this step run
--- `SparseReorder` to restore index ordering.
--- 
--- By default, if two values sum to zero at some index, the output `SparseTensor`
--- would still include that particular location in its index, storing a zero in the
--- corresponding value slot.  To override this, callers can specify `thresh`,
--- indicating that if the sum has a magnitude strictly smaller than `thresh`, its
--- corresponding value and index would then not be included.  In particular,
--- `thresh == 0` (default) means everything is kept and actual thresholding happens
--- only for a positive value.
--- 
--- In the following shapes, `nnz` is the count after taking `thresh` into account.
-sparseAdd :: forall v1 v2 v3 v4 v5 v6 v7 t treal . (TensorType t,
-                                                    OneOf '[(Data.Complex.Complex Double),
-                                                            (Data.Complex.Complex Float),
-                                                            Data.Int.Int16,
-                                                            Data.Int.Int32,
-                                                            Data.Int.Int64,
-                                                            Data.Int.Int8,
-                                                            Data.Word.Word16,
-                                                            Data.Word.Word8,
-                                                            Double, Float] t,
-                                                    TensorType treal,
-                                                    OneOf '[Data.Int.Int16,
-                                                            Data.Int.Int32,
-                                                            Data.Int.Int64,
-                                                            Data.Int.Int8,
-                                                            Data.Word.Word16,
-                                                            Data.Word.Word8,
-                                                            Double,
-                                                            Float] treal) =>
-             Tensor v1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
-             -> Tensor v2 t -- ^ __a_values__: 1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector.
-             -> Tensor v3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
-             -> Tensor v4 Data.Int.Int64 -- ^ __b_indices__: 2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
-             -> Tensor v5 t -- ^ __b_values__: 1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector.
-             -> Tensor v6 Data.Int.Int64 -- ^ __b_shape__: 1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
-             -> Tensor v7 treal -- ^ __thresh__: 0-D.  The magnitude threshold that determines if an output value/index
-                                -- pair takes space.
-             -> (Tensor Value Data.Int.Int64, Tensor Value t,
-                 Tensor Value Data.Int.Int64)
-             -- ^ (__sum_indices__, __sum_values__, __sum_shape__)
-             --
-             -- * __sum_indices__
-             --
-             -- * __sum_values__
-             --
-             -- * __sum_shape__
-sparseAdd a_indices a_values a_shape b_indices b_values b_shape
-          thresh | eqLengthGuard [] =
-    buildOp (opDef "SparseAdd"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Treal" .~ tensorType (undefined :: treal))
-        a_indices a_values a_shape b_indices b_values b_shape thresh
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "Treal"
-  type: "type"
-}
-input_arg {
-  description: "2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix."
-  name: "a_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector."
-  name: "a_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector."
-  name: "a_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix."
-  name: "b_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector."
-  name: "b_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector."
-  name: "b_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "0-D.  The magnitude threshold that determines if an output value/index\npair takes space."
-  name: "thresh"
-  type_attr: "Treal"
-}
-output_arg { name: "sum_indices" type: DT_INT64 }
-output_arg { name: "sum_values" type_attr: "T" }
-output_arg { name: "sum_shape" type: DT_INT64 }
--}
-
--- | Update '*var' according to the centered RMSProp algorithm.
---
--- The centered RMSProp algorithm uses an estimate of the centered second moment
--- (i.e., the variance) for normalization, as opposed to regular RMSProp, which
--- uses the (uncentered) second moment. This often helps with training, but is
--- slightly more expensive in terms of computation and memory.
--- 
--- Note that in dense implementation of this algorithm, mg, ms, and mom will
--- update even if the grad is zero, but in this sparse implementation, mg, ms,
--- and mom will not update in iterations during which the grad is zero.
--- 
--- mean_square = decay * mean_square + (1-decay) * gradient ** 2
--- mean_grad = decay * mean_grad + (1-decay) * gradient
--- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
--- 
--- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
--- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
--- var <- var - mom
-sparseApplyCenteredRMSProp :: forall v5 v6 v7 v8 v9 v10 t
-                              tindices . (TensorType t,
-                                          OneOf '[(Data.Complex.Complex Double),
-                                                  (Data.Complex.Complex Float),
-                                                  Data.Int.Int16,
-                                                  Data.Int.Int32,
-                                                  Data.Int.Int64, Data.Int.Int8,
-                                                  Data.Word.Word16,
-                                                  Data.Word.Word8, Double,
-                                                  Float] t, TensorType tindices,
-                                          OneOf '[Data.Int.Int32,
-                                                  Data.Int.Int64] tindices) =>
-                              Tensor Ref t -- ^ __var__: Should be from a Variable().
-                              -> Tensor Ref t -- ^ __mg__: Should be from a Variable().
-                              -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
-                              -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
-                              -> Tensor v5 t -- ^ __lr__: Scaling factor. Must be a scalar.
-                              -> Tensor v6 t -- ^ __rho__: Decay rate. Must be a scalar.
-                              -> Tensor v7 t -- ^ __momentum__
-                              -> Tensor v8 t -- ^ __epsilon__: Ridge term. Must be a scalar.
-                              -> Tensor v9 t -- ^ __grad__: The gradient.
-                              -> Tensor v10 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.
-                              -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-sparseApplyCenteredRMSProp var mg ms mom lr rho momentum epsilon grad
-                           indices | eqLengthGuard [] =
-    buildOp (opDef "SparseApplyCenteredRMSProp"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        var mg ms mom lr rho momentum epsilon grad indices
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "mg"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "ms"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "mom"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "Decay rate. Must be a scalar."
-  name: "rho"
-  type_attr: "T"
-}
-input_arg { name: "momentum" type_attr: "T" }
-input_arg {
-  description: "Ridge term. Must be a scalar."
-  name: "epsilon"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "A vector of indices into the first dimension of var, ms and mom."
-  name: "indices"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Add all input tensors element wise.
-
-addN :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                             (Data.Complex.Complex Float),
-                                             Data.Int.Int16, Data.Int.Int32,
-                                             Data.Int.Int64, Data.Int.Int8,
-                                             Data.Word.Word16, Data.Word.Word8,
-                                             Double, Float] t) =>
-        [Tensor v1 t] -- ^ __inputs__: Must all be the same size and shape.
-        -> Tensor Value t -- ^ __sum__
-addN inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
-    buildOp (opDef "AddN"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "N" .~ n)
-        inputs
-  where
-    n = fromIntegral (length inputs) :: Int64
-{-
-attr { has_minimum: true minimum: 1 name: "N" type: "int" }
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Must all be the same size and shape."
-  name: "inputs"
-  number_attr: "N"
-  type_attr: "T"
-}
-output_arg { name: "sum" type_attr: "T" }
--}
-
--- | Computes offsets of concat inputs within its output.
---
--- For example:
--- 
--- ```prettyprint
--- # 'x' is [2, 2, 7]
--- # 'y' is [2, 3, 7]
--- # 'z' is [2, 5, 7]
--- concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
--- ```
-concatOffset :: Tensor v1 Data.Int.Int32 -- ^ __concat_dim__: The dimension along which to concatenate.
-                -> [Tensor v2 Data.Int.Int32] -- ^ __shape__: The `N` int32 vectors representing shape of tensors being concatenated.
-                -> [Tensor Value Data.Int.Int32] -- ^ __offset__: The `N` int32 vectors representing the starting offset
-                --         of input tensors within the concatenated output.
-                -- 
-                -- This is typically used by gradient computations for a concat operation.
-concatOffset concat_dim
-             shape | eqLengthGuard [("N", [("shape", length shape)])] =
-    buildListOp [n] (opDef "ConcatOffset"
-                     & opAttr "N" .~ n)
-        concat_dim shape
-  where
-    n = fromIntegral (length shape) :: Int64
-{-
-attr { has_minimum: true minimum: 2 name: "N" type: "int" }
-input_arg {
-  description: "The dimension along which to concatenate."
-  name: "concat_dim"
-  type: DT_INT32
-}
-input_arg {
-  description: "The `N` int32 vectors representing shape of tensors being concatenated."
-  name: "shape"
-  number_attr: "N"
-  type: DT_INT32
-}
-output_arg {
-  description: "The `N` int32 vectors representing the starting offset\n        of input tensors within the concatenated output.\n\nThis is typically used by gradient computations for a concat operation."
-  name: "offset"
-  number_attr: "N"
-  type: DT_INT32
-}
--}
-
--- | Concatenates tensors along one dimension.
-
-concatV2 :: forall v1 v2 t tidx . (TensorType t, TensorType tidx,
-                                   OneOf '[Data.Int.Int32,
-                                           Data.Int.Int64] tidx) =>
-            [Tensor v1 t] -- ^ __values__: List of `N` Tensors to concatenate. Their ranks and types must match,
-                          -- and their sizes must match in all dimensions except `concat_dim`.
-            -> Tensor v2 tidx -- ^ __axis__: 0-D.  The dimension along which to concatenate.  Must be in the
-                              -- range [0, rank(values)).
-            -> Tensor Value t -- ^ __output__: A `Tensor` with the concatenation of values stacked along the
-            -- `concat_dim` dimension.  This tensor's shape matches that of `values` except
-            -- in `concat_dim` where it has the sum of the sizes.
-concatV2 values axis | eqLengthGuard [("N", [("values", length values)])] =
-    buildOp (opDef "ConcatV2"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx)
-             & opAttr "N" .~ n)
-        values axis
-  where
-    n = fromIntegral (length values) :: Int64
-{-
-attr { has_minimum: true minimum: 2 name: "N" type: "int" }
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "List of `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
-  name: "values"
-  number_attr: "N"
-  type_attr: "T"
-}
-input_arg {
-  description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [0, rank(values))."
-  name: "axis"
-  type_attr: "Tidx"
-}
-output_arg {
-  description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Returns a tensor of zeros with the same shape and type as x.
-
-zerosLike :: forall v1 t . (TensorType t) =>
-             Tensor v1 t -- ^ __x__: a tensor of type T.
-             -> Tensor Value t -- ^ __y__: a tensor of the same shape and type as x but filled with zeros.
-zerosLike x | eqLengthGuard [] =
-    buildOp (opDef "ZerosLike"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "a tensor of type T." name: "x" type_attr: "T"
-}
-output_arg {
-  description: "a tensor of the same shape and type as x but filled with zeros."
-  name: "y"
-  type_attr: "T"
-}
--}
-
--- | Update '*var' according to the centered RMSProp algorithm.
---
--- The centered RMSProp algorithm uses an estimate of the centered second moment
--- (i.e., the variance) for normalization, as opposed to regular RMSProp, which
--- uses the (uncentered) second moment. This often helps with training, but is
--- slightly more expensive in terms of computation and memory.
--- 
--- Note that in dense implementation of this algorithm, mg, ms, and mom will
--- update even if the grad is zero, but in this sparse implementation, mg, ms,
--- and mom will not update in iterations during which the grad is zero.
--- 
--- mean_square = decay * mean_square + (1-decay) * gradient ** 2
--- mean_grad = decay * mean_grad + (1-decay) * gradient
--- 
--- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
--- 
--- mg <- rho * mg_{t-1} + (1-rho) * grad
--- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
--- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
--- var <- var - mom
-applyCenteredRMSProp :: forall v5 v6 v7 v8 v9 t . (TensorType t,
-                                                   OneOf '[(Data.Complex.Complex Double),
-                                                           (Data.Complex.Complex Float),
-                                                           Data.Int.Int16,
-                                                           Data.Int.Int32,
-                                                           Data.Int.Int64,
-                                                           Data.Int.Int8,
-                                                           Data.Word.Word16,
-                                                           Data.Word.Word8,
-                                                           Double, Float] t) =>
-                        Tensor Ref t -- ^ __var__: Should be from a Variable().
-                        -> Tensor Ref t -- ^ __mg__: Should be from a Variable().
-                        -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
-                        -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
-                        -> Tensor v5 t -- ^ __lr__: Scaling factor. Must be a scalar.
-                        -> Tensor v6 t -- ^ __rho__: Decay rate. Must be a scalar.
-                        -> Tensor v7 t -- ^ __momentum__
-                        -> Tensor v8 t -- ^ __epsilon__: Ridge term. Must be a scalar.
-                        -> Tensor v9 t -- ^ __grad__: The gradient.
-                        -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-applyCenteredRMSProp var mg ms mom lr rho momentum epsilon
-                     grad | eqLengthGuard [] =
-    buildOp (opDef "ApplyCenteredRMSProp"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        var mg ms mom lr rho momentum epsilon grad
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "mg"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "ms"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "mom"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "Decay rate. Must be a scalar."
-  name: "rho"
-  type_attr: "T"
-}
-input_arg { name: "momentum" type_attr: "T" }
-input_arg {
-  description: "Ridge term. Must be a scalar."
-  name: "epsilon"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Update '*var' according to the RMSProp algorithm.
---
--- Note that in dense implementation of this algorithm, ms and mom will
--- update even if the grad is zero, but in this sparse implementation, ms
--- and mom will not update in iterations during which the grad is zero.
--- 
--- mean_square = decay * mean_square + (1-decay) * gradient ** 2
--- Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
--- 
--- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
--- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
--- var <- var - mom
-applyRMSProp :: forall v4 v5 v6 v7 v8 t . (TensorType t,
-                                           OneOf '[(Data.Complex.Complex Double),
-                                                   (Data.Complex.Complex Float),
-                                                   Data.Int.Int16,
-                                                   Data.Int.Int32,
-                                                   Data.Int.Int64,
-                                                   Data.Int.Int8,
-                                                   Data.Word.Word16,
-                                                   Data.Word.Word8, Double,
-                                                   Float] t) =>
-                Tensor Ref t -- ^ __var__: Should be from a Variable().
-                -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
-                -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
-                -> Tensor v4 t -- ^ __lr__: Scaling factor. Must be a scalar.
-                -> Tensor v5 t -- ^ __rho__: Decay rate. Must be a scalar.
-                -> Tensor v6 t -- ^ __momentum__
-                -> Tensor v7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
-                -> Tensor v8 t -- ^ __grad__: The gradient.
-                -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-applyRMSProp var ms mom lr rho momentum epsilon grad | eqLengthGuard [] =
-    buildOp (opDef "ApplyRMSProp"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        var ms mom lr rho momentum epsilon grad
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "ms"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "mom"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "Decay rate. Must be a scalar."
-  name: "rho"
-  type_attr: "T"
-}
-input_arg { name: "momentum" type_attr: "T" }
-input_arg {
-  description: "Ridge term. Must be a scalar."
-  name: "epsilon"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Adds a value to the current value of a variable.
---
--- Any ReadVariableOp which depends directly or indirectly on this assign is
--- guaranteed to see the incremented value or a subsequent newer one.
--- 
--- Outputs the incremented value, which can be used to totally order the
--- increments to this variable.
-assignAddVariableOp :: forall v2 dtype . (TensorType dtype) =>
-                       ResourceHandle dtype -- ^ __resource__: handle to the resource in which to store the variable.
-                       -> Tensor v2 dtype -- ^ __value__: the value by which the variable will be incremented.
-                       -> Build (ControlNode)
-assignAddVariableOp resource value | eqLengthGuard [] =
-    buildOp (opDef "AssignAddVariableOp"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        resource value
-{-
-attr {
-  description: "the dtype of the value." name: "dtype" type: "type"
-}
-input_arg {
-  description: "handle to the resource in which to store the variable."
-  name: "resource"
-  type: DT_RESOURCE
-}
-input_arg {
-  description: "the value by which the variable will be incremented."
-  name: "value"
-  type_attr: "dtype"
-}
--}
-
--- | Update '*var' according to the Adam algorithm.
---
--- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
--- m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
--- v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
--- variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
-applyAdam :: forall v4 v5 v6 v7 v8 v9 v10 t . (TensorType t,
-                                               OneOf '[(Data.Complex.Complex Double),
-                                                       (Data.Complex.Complex Float),
-                                                       Data.Int.Int16,
-                                                       Data.Int.Int32,
-                                                       Data.Int.Int64,
-                                                       Data.Int.Int8,
-                                                       Data.Word.Word16,
-                                                       Data.Word.Word8, Double,
-                                                       Float] t) =>
-             Tensor Ref t -- ^ __var__: Should be from a Variable().
-             -> Tensor Ref t -- ^ __m__: Should be from a Variable().
-             -> Tensor Ref t -- ^ __v__: Should be from a Variable().
-             -> Tensor v4 t -- ^ __beta1_power__: Must be a scalar.
-             -> Tensor v5 t -- ^ __beta2_power__: Must be a scalar.
-             -> Tensor v6 t -- ^ __lr__: Scaling factor. Must be a scalar.
-             -> Tensor v7 t -- ^ __beta1__: Momentum factor. Must be a scalar.
-             -> Tensor v8 t -- ^ __beta2__: Momentum factor. Must be a scalar.
-             -> Tensor v9 t -- ^ __epsilon__: Ridge term. Must be a scalar.
-             -> Tensor v10 t -- ^ __grad__: The gradient.
-             -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-applyAdam var m v beta1_power beta2_power lr beta1 beta2 epsilon
-          grad | eqLengthGuard [] =
-    buildOp (opDef "ApplyAdam"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        var m v beta1_power beta2_power lr beta1 beta2 epsilon grad
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "m"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "v"
-  type_attr: "T"
-}
-input_arg {
-  description: "Must be a scalar." name: "beta1_power" type_attr: "T"
-}
-input_arg {
-  description: "Must be a scalar." name: "beta2_power" type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "Momentum factor. Must be a scalar."
-  name: "beta1"
-  type_attr: "T"
-}
-input_arg {
-  description: "Momentum factor. Must be a scalar."
-  name: "beta2"
-  type_attr: "T"
-}
-input_arg {
-  description: "Ridge term. Must be a scalar."
-  name: "epsilon"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Extracts a glimpse from the input tensor.
---
--- Returns a set of windows called glimpses extracted at location
--- `offsets` from the input tensor. If the windows only partially
--- overlaps the inputs, the non overlapping areas will be filled with
--- random noise.
--- 
--- The result is a 4-D tensor of shape `[batch_size, glimpse_height,
--- glimpse_width, channels]`. The channels and batch dimensions are the
--- same as that of the input tensor. The height and width of the output
--- windows are specified in the `size` parameter.
--- 
--- The argument `normalized` and `centered` controls how the windows are built:
--- 
--- * If the coordinates are normalized but not centered, 0.0 and 1.0
---   correspond to the minimum and maximum of each height and width
---   dimension.
--- * If the coordinates are both normalized and centered, they range from
---   -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
---   left corner, the lower right corner is located at (1.0, 1.0) and the
---   center is at (0, 0).
--- * If the coordinates are not normalized they are interpreted as
---   numbers of pixels.
-extractGlimpse :: Tensor v1 Float -- ^ __input__: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
-                  -> Tensor v2 Data.Int.Int32 -- ^ __size__: A 1-D tensor of 2 elements containing the size of the glimpses
-                                              -- to extract.  The glimpse height must be specified first, following
-                                              -- by the glimpse width.
-                  -> Tensor v3 Float -- ^ __offsets__: A 2-D integer tensor of shape `[batch_size, 2]` containing
-                                     -- the x, y locations of the center of each window.
-                  -> Tensor Value Float -- ^ __glimpse__: A tensor representing the glimpses `[batch_size,
-                  -- glimpse_height, glimpse_width, channels]`.
-extractGlimpse input size offsets | eqLengthGuard [] =
-    buildOp (opDef "ExtractGlimpse")
-        input size offsets
-{-
-attr {
-  default_value { b: true }
-  description: "indicates if the offset coordinates are centered relative to\nthe image, in which case the (0, 0) offset is relative to the center\nof the input images. If false, the (0,0) offset corresponds to the\nupper left corner of the input images."
-  name: "centered"
-  type: "bool"
-}
-attr {
-  default_value { b: true }
-  description: "indicates if the offset coordinates are normalized."
-  name: "normalized"
-  type: "bool"
-}
-attr {
-  default_value { b: true }
-  description: "indicates if the noise should be generated using a\nuniform distribution or a Gaussian distribution."
-  name: "uniform_noise"
-  type: "bool"
-}
-input_arg {
-  description: "A 4-D float tensor of shape `[batch_size, height, width, channels]`."
-  name: "input"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A 1-D tensor of 2 elements containing the size of the glimpses\nto extract.  The glimpse height must be specified first, following\nby the glimpse width."
-  name: "size"
-  type: DT_INT32
-}
-input_arg {
-  description: "A 2-D integer tensor of shape `[batch_size, 2]` containing\nthe x, y locations of the center of each window."
-  name: "offsets"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "A tensor representing the glimpses `[batch_size,\nglimpse_height, glimpse_width, channels]`."
-  name: "glimpse"
-  type: DT_FLOAT
-}
--}
-
--- | Update relevant entries in '*var' and '*accum' according to the momentum scheme.
---
--- Set use_nesterov = True if you want to use Nesterov momentum.
--- 
--- That is for rows we have grad for, we update var and accum as follows:
--- 
--- accum = accum * momentum + grad
--- var -= lr * accum
-sparseApplyMomentum :: forall v3 v4 v5 v6 t tindices . (TensorType t,
-                                                        OneOf '[(Data.Complex.Complex Double),
-                                                                (Data.Complex.Complex Float),
-                                                                Data.Int.Int16,
-                                                                Data.Int.Int32,
-                                                                Data.Int.Int64,
-                                                                Data.Int.Int8,
-                                                                Data.Word.Word16,
-                                                                Data.Word.Word8,
-                                                                Double,
-                                                                Float] t,
-                                                        TensorType tindices,
-                                                        OneOf '[Data.Int.Int32,
-                                                                Data.Int.Int64] tindices) =>
-                       Tensor Ref t -- ^ __var__: Should be from a Variable().
-                       -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
-                       -> Tensor v3 t -- ^ __lr__: Learning rate. Must be a scalar.
-                       -> Tensor v4 t -- ^ __grad__: The gradient.
-                       -> Tensor v5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
-                       -> Tensor v6 t -- ^ __momentum__: Momentum. Must be a scalar.
-                       -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-sparseApplyMomentum var accum lr grad indices momentum | eqLengthGuard [] =
-    buildOp (opDef "SparseApplyMomentum"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        var accum lr grad indices momentum
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
-  name: "use_locking"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
-  name: "use_nesterov"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "accum"
-  type_attr: "T"
-}
-input_arg {
-  description: "Learning rate. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "A vector of indices into the first dimension of var and accum."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "Momentum. Must be a scalar."
-  name: "momentum"
-  type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Update '*var' according to the momentum scheme. Set use_nesterov = True if you
---
--- want to use Nesterov momentum.
--- 
--- accum = accum * momentum + grad
--- var -= lr * accum
-applyMomentum :: forall v3 v4 v5 t . (TensorType t,
-                                      OneOf '[(Data.Complex.Complex Double),
-                                              (Data.Complex.Complex Float),
-                                              Data.Int.Int16, Data.Int.Int32,
-                                              Data.Int.Int64, Data.Int.Int8,
-                                              Data.Word.Word16, Data.Word.Word8,
-                                              Double, Float] t) =>
-                 Tensor Ref t -- ^ __var__: Should be from a Variable().
-                 -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
-                 -> Tensor v3 t -- ^ __lr__: Scaling factor. Must be a scalar.
-                 -> Tensor v4 t -- ^ __grad__: The gradient.
-                 -> Tensor v5 t -- ^ __momentum__: Momentum. Must be a scalar.
-                 -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-applyMomentum var accum lr grad momentum | eqLengthGuard [] =
-    buildOp (opDef "ApplyMomentum"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        var accum lr grad momentum
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
-  name: "use_locking"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
-  name: "use_nesterov"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "accum"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "Momentum. Must be a scalar."
-  name: "momentum"
-  type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | A queue that produces elements in first-in first-out order.
-
-fIFOQueue :: Build (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
-fIFOQueue  | eqLengthGuard [] =
-    buildOp (opDef "FIFOQueue")
-        
-{-
-attr {
-  description: "The type of each component in a value."
-  has_minimum: true
-  minimum: 1
-  name: "component_types"
-  type: "list(type)"
-}
-attr {
-  default_value { list { } }
-  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
-  has_minimum: true
-  name: "shapes"
-  type: "list(shape)"
-}
-attr {
-  default_value { i: -1 }
-  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
-  name: "capacity"
-  type: "int"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
-  name: "shared_name"
-  type: "string"
-}
-output_arg {
-  description: "The handle to the queue."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
--}
-
--- | Update relevant entries in '*var' according to the Ftrl-proximal scheme.
---
--- That is for rows we have grad for, we update var, accum and linear as follows:
--- accum_new = accum + grad * grad
--- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
--- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
--- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
--- accum = accum_new
-sparseApplyFtrl :: forall v4 v5 v6 v7 v8 v9 t tindices . (TensorType t,
-                                                          OneOf '[(Data.Complex.Complex Double),
-                                                                  (Data.Complex.Complex Float),
-                                                                  Data.Int.Int16,
-                                                                  Data.Int.Int32,
-                                                                  Data.Int.Int64,
-                                                                  Data.Int.Int8,
-                                                                  Data.Word.Word16,
-                                                                  Data.Word.Word8,
-                                                                  Double,
-                                                                  Float] t,
-                                                          TensorType tindices,
-                                                          OneOf '[Data.Int.Int32,
-                                                                  Data.Int.Int64] tindices) =>
-                   Tensor Ref t -- ^ __var__: Should be from a Variable().
-                   -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
-                   -> Tensor Ref t -- ^ __linear__: Should be from a Variable().
-                   -> Tensor v4 t -- ^ __grad__: The gradient.
-                   -> Tensor v5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
-                   -> Tensor v6 t -- ^ __lr__: Scaling factor. Must be a scalar.
-                   -> Tensor v7 t -- ^ __l1__: L1 regularization. Must be a scalar.
-                   -> Tensor v8 t -- ^ __l2__: L2 regularization. Must be a scalar.
-                   -> Tensor v9 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
-                   -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-sparseApplyFtrl var accum linear grad indices lr l1 l2
-                lr_power | eqLengthGuard [] =
-    buildOp (opDef "SparseApplyFtrl"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        var accum linear grad indices lr l1 l2 lr_power
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "accum"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "linear"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "A vector of indices into the first dimension of var and accum."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "L1 regularization. Must be a scalar."
-  name: "l1"
-  type_attr: "T"
-}
-input_arg {
-  description: "L2 regularization. Must be a scalar."
-  name: "l2"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr_power"
-  type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
-
-sparseApplyAdagradDA :: forall v4 v5 v6 v7 v8 v9 t tindices . (TensorType t,
-                                                               OneOf '[(Data.Complex.Complex Double),
-                                                                       (Data.Complex.Complex Float),
-                                                                       Data.Int.Int16,
-                                                                       Data.Int.Int32,
-                                                                       Data.Int.Int64,
-                                                                       Data.Int.Int8,
-                                                                       Data.Word.Word16,
-                                                                       Data.Word.Word8,
-                                                                       Double,
-                                                                       Float] t,
-                                                               TensorType tindices,
-                                                               OneOf '[Data.Int.Int32,
-                                                                       Data.Int.Int64] tindices) =>
-                        Tensor Ref t -- ^ __var__: Should be from a Variable().
-                        -> Tensor Ref t -- ^ __gradient_accumulator__: Should be from a Variable().
-                        -> Tensor Ref t -- ^ __gradient_squared_accumulator__: Should be from a Variable().
-                        -> Tensor v4 t -- ^ __grad__: The gradient.
-                        -> Tensor v5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
-                        -> Tensor v6 t -- ^ __lr__: Learning rate. Must be a scalar.
-                        -> Tensor v7 t -- ^ __l1__: L1 regularization. Must be a scalar.
-                        -> Tensor v8 t -- ^ __l2__: L2 regularization. Must be a scalar.
-                        -> Tensor v9 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
-                        -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-sparseApplyAdagradDA var gradient_accumulator gradient_squared_accumulator grad
-                     indices lr l1 l2 global_step | eqLengthGuard [] =
-    buildOp (opDef "SparseApplyAdagradDA"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        var gradient_accumulator gradient_squared_accumulator grad indices lr l1
-        l2 global_step
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "gradient_accumulator"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "gradient_squared_accumulator"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "A vector of indices into the first dimension of var and accum."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "Learning rate. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "L1 regularization. Must be a scalar."
-  name: "l1"
-  type_attr: "T"
-}
-input_arg {
-  description: "L2 regularization. Must be a scalar."
-  name: "l2"
-  type_attr: "T"
-}
-input_arg {
-  description: "Training step number. Must be a scalar."
-  name: "global_step"
-  type: DT_INT64
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Returns x // y element-wise.
---
--- *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-floorDiv :: forall v1 v2 t . (TensorType t,
-                              OneOf '[(Data.Complex.Complex Double),
-                                      (Data.Complex.Complex Float),
-                                      Data.Int.Int16, Data.Int.Int32,
-                                      Data.Int.Int64, Data.Int.Int8,
-                                      Data.Word.Word16, Data.Word.Word8, Double,
-                                      Float] t) => Tensor v1 t -- ^ __x__
-            -> Tensor v2 t -- ^ __y__
-            -> Tensor Value t -- ^ __z__
-floorDiv x y | eqLengthGuard [] =
-    buildOp (opDef "FloorDiv"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Update '*var' according to the proximal adagrad scheme.
-
-applyAdagradDA :: forall v4 v5 v6 v7 v8 t . (TensorType t,
-                                             OneOf '[(Data.Complex.Complex Double),
-                                                     (Data.Complex.Complex Float),
-                                                     Data.Int.Int16,
-                                                     Data.Int.Int32,
-                                                     Data.Int.Int64,
-                                                     Data.Int.Int8,
-                                                     Data.Word.Word16,
-                                                     Data.Word.Word8, Double,
-                                                     Float] t) =>
-                  Tensor Ref t -- ^ __var__: Should be from a Variable().
-                  -> Tensor Ref t -- ^ __gradient_accumulator__: Should be from a Variable().
-                  -> Tensor Ref t -- ^ __gradient_squared_accumulator__: Should be from a Variable().
-                  -> Tensor v4 t -- ^ __grad__: The gradient.
-                  -> Tensor v5 t -- ^ __lr__: Scaling factor. Must be a scalar.
-                  -> Tensor v6 t -- ^ __l1__: L1 regularization. Must be a scalar.
-                  -> Tensor v7 t -- ^ __l2__: L2 regularization. Must be a scalar.
-                  -> Tensor v8 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar.
-                  -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-applyAdagradDA var gradient_accumulator gradient_squared_accumulator grad lr l1
-               l2 global_step | eqLengthGuard [] =
-    buildOp (opDef "ApplyAdagradDA"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        var gradient_accumulator gradient_squared_accumulator grad lr l1 l2
-        global_step
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "gradient_accumulator"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "gradient_squared_accumulator"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "L1 regularization. Must be a scalar."
-  name: "l1"
-  type_attr: "T"
-}
-input_arg {
-  description: "L2 regularization. Must be a scalar."
-  name: "l2"
-  type_attr: "T"
-}
-input_arg {
-  description: "Training step number. Must be a scalar."
-  name: "global_step"
-  type: DT_INT64
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Update '*var' according to the adagrad scheme.
---
--- accum += grad * grad
--- var -= lr * grad * (1 / sqrt(accum))
-applyAdagrad :: forall v3 v4 t . (TensorType t,
-                                  OneOf '[(Data.Complex.Complex Double),
-                                          (Data.Complex.Complex Float),
-                                          Data.Int.Int16, Data.Int.Int32,
-                                          Data.Int.Int64, Data.Int.Int8,
-                                          Data.Word.Word16, Data.Word.Word8,
-                                          Double, Float] t) =>
-                Tensor Ref t -- ^ __var__: Should be from a Variable().
-                -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
-                -> Tensor v3 t -- ^ __lr__: Scaling factor. Must be a scalar.
-                -> Tensor v4 t -- ^ __grad__: The gradient.
-                -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-applyAdagrad var accum lr grad | eqLengthGuard [] =
-    buildOp (opDef "ApplyAdagrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        var accum lr grad
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "accum"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Computes the gradient of the sigmoid of `x` wrt its input.
---
--- Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
--- `dy` is the corresponding input gradient.
-sigmoidGrad :: forall v1 v2 t . (TensorType t,
-                                 OneOf '[(Data.Complex.Complex Double),
-                                         (Data.Complex.Complex Float),
-                                         Data.Word.Word16, Double, Float] t) =>
-               Tensor v1 t -- ^ __x__
-               -> Tensor v2 t -- ^ __y__
-               -> Tensor Value t -- ^ __z__
-sigmoidGrad x y | eqLengthGuard [] =
-    buildOp (opDef "SigmoidGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Update '*var' according to the adadelta scheme.
---
--- accum = rho() * accum + (1 - rho()) * grad.square();
--- update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
--- update_accum = rho() * update_accum + (1 - rho()) * update.square();
--- var -= update;
-applyAdadelta :: forall v4 v5 v6 v7 t . (TensorType t,
-                                         OneOf '[(Data.Complex.Complex Double),
-                                                 (Data.Complex.Complex Float),
-                                                 Data.Int.Int16, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Data.Word.Word16,
-                                                 Data.Word.Word8, Double,
-                                                 Float] t) =>
-                 Tensor Ref t -- ^ __var__: Should be from a Variable().
-                 -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
-                 -> Tensor Ref t -- ^ __accum_update__: Should be from a Variable().
-                 -> Tensor v4 t -- ^ __lr__: Scaling factor. Must be a scalar.
-                 -> Tensor v5 t -- ^ __rho__: Decay factor. Must be a scalar.
-                 -> Tensor v6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
-                 -> Tensor v7 t -- ^ __grad__: The gradient.
-                 -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-applyAdadelta var accum accum_update lr rho epsilon grad | eqLengthGuard [] =
-    buildOp (opDef "ApplyAdadelta"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        var accum accum_update lr rho epsilon grad
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "accum"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "accum_update"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "Decay factor. Must be a scalar."
-  name: "rho"
-  type_attr: "T"
-}
-input_arg {
-  description: "Constant factor. Must be a scalar."
-  name: "epsilon"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Sparse update '*var' as FOBOS algorithm with fixed learning rate.
---
--- That is for rows we have grad for, we update var as follows:
--- prox_v = var - alpha * grad
--- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
-sparseApplyProximalGradientDescent :: forall v2 v3 v4 v5 v6 t
-                                      tindices . (TensorType t,
-                                                  OneOf '[(Data.Complex.Complex Double),
-                                                          (Data.Complex.Complex Float),
-                                                          Data.Int.Int16,
-                                                          Data.Int.Int32,
-                                                          Data.Int.Int64,
-                                                          Data.Int.Int8,
-                                                          Data.Word.Word16,
-                                                          Data.Word.Word8,
-                                                          Double, Float] t,
-                                                  TensorType tindices,
-                                                  OneOf '[Data.Int.Int32,
-                                                          Data.Int.Int64] tindices) =>
-                                      Tensor Ref t -- ^ __var__: Should be from a Variable().
-                                      -> Tensor v2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
-                                      -> Tensor v3 t -- ^ __l1__: L1 regularization. Must be a scalar.
-                                      -> Tensor v4 t -- ^ __l2__: L2 regularization. Must be a scalar.
-                                      -> Tensor v5 t -- ^ __grad__: The gradient.
-                                      -> Tensor v6 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
-                                      -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-sparseApplyProximalGradientDescent var alpha l1 l2 grad
-                                   indices | eqLengthGuard [] =
-    buildOp (opDef "SparseApplyProximalGradientDescent"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        var alpha l1 l2 grad indices
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "alpha"
-  type_attr: "T"
-}
-input_arg {
-  description: "L1 regularization. Must be a scalar."
-  name: "l1"
-  type_attr: "T"
-}
-input_arg {
-  description: "L2 regularization. Must be a scalar."
-  name: "l2"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "A vector of indices into the first dimension of var and accum."
-  name: "indices"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Update '*var' as FOBOS algorithm with fixed learning rate.
---
--- prox_v = var - alpha * delta
--- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
-applyProximalGradientDescent :: forall v2 v3 v4 v5 t . (TensorType t,
-                                                        OneOf '[(Data.Complex.Complex Double),
-                                                                (Data.Complex.Complex Float),
-                                                                Data.Int.Int16,
-                                                                Data.Int.Int32,
-                                                                Data.Int.Int64,
-                                                                Data.Int.Int8,
-                                                                Data.Word.Word16,
-                                                                Data.Word.Word8,
-                                                                Double,
-                                                                Float] t) =>
-                                Tensor Ref t -- ^ __var__: Should be from a Variable().
-                                -> Tensor v2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
-                                -> Tensor v3 t -- ^ __l1__: L1 regularization. Must be a scalar.
-                                -> Tensor v4 t -- ^ __l2__: L2 regularization. Must be a scalar.
-                                -> Tensor v5 t -- ^ __delta__: The change.
-                                -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-applyProximalGradientDescent var alpha l1 l2 delta | eqLengthGuard [] =
-    buildOp (opDef "ApplyProximalGradientDescent"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        var alpha l1 l2 delta
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "alpha"
-  type_attr: "T"
-}
-input_arg {
-  description: "L1 regularization. Must be a scalar."
-  name: "l1"
-  type_attr: "T"
-}
-input_arg {
-  description: "L2 regularization. Must be a scalar."
-  name: "l2"
-  type_attr: "T"
-}
-input_arg {
-  description: "The change." name: "delta" type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Solves systems of linear equations.
---
--- `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
--- form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
--- a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix
--- satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
--- If `adjoint` is `True` then each output matrix satisfies
--- `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
-matrixSolve :: forall v1 v2 t . (TensorType t,
-                                 OneOf '[(Data.Complex.Complex Double),
-                                         (Data.Complex.Complex Float), Double,
-                                         Float] t) =>
-               Tensor v1 t -- ^ __matrix__: Shape is `[..., M, M]`.
-               -> Tensor v2 t -- ^ __rhs__: Shape is `[..., M, K]`.
-               -> Tensor Value t -- ^ __output__: Shape is `[..., M, K]`.
-matrixSolve matrix rhs | eqLengthGuard [] =
-    buildOp (opDef "MatrixSolve"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        matrix rhs
-{-
-attr {
-  default_value { b: false }
-  description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\nadjoint."
-  name: "adjoint"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_DOUBLE
-      type: DT_FLOAT
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Shape is `[..., M, M]`."
-  name: "matrix"
-  type_attr: "T"
-}
-input_arg {
-  description: "Shape is `[..., M, K]`." name: "rhs" type_attr: "T"
-}
-output_arg {
-  description: "Shape is `[..., M, K]`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
---
--- That is for rows we have grad for, we update var and accum as follows:
--- accum += grad * grad
--- prox_v = var
--- prox_v -= lr * grad * (1 / sqrt(accum))
--- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
-sparseApplyProximalAdagrad :: forall v3 v4 v5 v6 v7 t tindices . (TensorType t,
-                                                                  OneOf '[(Data.Complex.Complex Double),
-                                                                          (Data.Complex.Complex Float),
-                                                                          Data.Int.Int16,
-                                                                          Data.Int.Int32,
-                                                                          Data.Int.Int64,
-                                                                          Data.Int.Int8,
-                                                                          Data.Word.Word16,
-                                                                          Data.Word.Word8,
-                                                                          Double,
-                                                                          Float] t,
-                                                                  TensorType tindices,
-                                                                  OneOf '[Data.Int.Int32,
-                                                                          Data.Int.Int64] tindices) =>
-                              Tensor Ref t -- ^ __var__: Should be from a Variable().
-                              -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
-                              -> Tensor v3 t -- ^ __lr__: Learning rate. Must be a scalar.
-                              -> Tensor v4 t -- ^ __l1__: L1 regularization. Must be a scalar.
-                              -> Tensor v5 t -- ^ __l2__: L2 regularization. Must be a scalar.
-                              -> Tensor v6 t -- ^ __grad__: The gradient.
-                              -> Tensor v7 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
-                              -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-sparseApplyProximalAdagrad var accum lr l1 l2 grad indices | eqLengthGuard [] =
-    buildOp (opDef "SparseApplyProximalAdagrad"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        var accum lr l1 l2 grad indices
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "accum"
-  type_attr: "T"
-}
-input_arg {
-  description: "Learning rate. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "L1 regularization. Must be a scalar."
-  name: "l1"
-  type_attr: "T"
-}
-input_arg {
-  description: "L2 regularization. Must be a scalar."
-  name: "l2"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "A vector of indices into the first dimension of var and accum."
-  name: "indices"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Update '*var' by subtracting 'alpha' * 'delta' from it.
-
-applyGradientDescent :: forall v2 v3 t . (TensorType t,
-                                          OneOf '[(Data.Complex.Complex Double),
-                                                  (Data.Complex.Complex Float),
-                                                  Data.Int.Int16,
-                                                  Data.Int.Int32,
-                                                  Data.Int.Int64, Data.Int.Int8,
-                                                  Data.Word.Word16,
-                                                  Data.Word.Word8, Double,
-                                                  Float] t) =>
-                        Tensor Ref t -- ^ __var__: Should be from a Variable().
-                        -> Tensor v2 t -- ^ __alpha__: Scaling factor. Must be a scalar.
-                        -> Tensor v3 t -- ^ __delta__: The change.
-                        -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-applyGradientDescent var alpha delta | eqLengthGuard [] =
-    buildOp (opDef "ApplyGradientDescent"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        var alpha delta
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "alpha"
-  type_attr: "T"
-}
-input_arg {
-  description: "The change." name: "delta" type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Batch normalization.
---
--- This op is deprecated. Prefer `tf.nn.batch_normalization`.
-batchNormWithGlobalNormalization :: forall v1 v2 v3 v4 v5 t . (TensorType t,
-                                                               OneOf '[(Data.Complex.Complex Double),
-                                                                       (Data.Complex.Complex Float),
-                                                                       Data.Int.Int16,
-                                                                       Data.Int.Int32,
-                                                                       Data.Int.Int64,
-                                                                       Data.Int.Int8,
-                                                                       Data.Word.Word16,
-                                                                       Data.Word.Word8,
-                                                                       Double,
-                                                                       Float] t) =>
-                                    Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor
-                                         -- needs to be multiplied with gamma.
-                                    -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0.
-                                    -> Tensor v1 t -- ^ __t__: A 4D input Tensor.
-                                    -> Tensor v2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t.
-                                                   -- This is the first output from tf.nn.moments,
-                                                   -- or a saved moving average thereof.
-                                    -> Tensor v3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t.
-                                                   -- This is the second output from tf.nn.moments,
-                                                   -- or a saved moving average thereof.
-                                    -> Tensor v4 t -- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t.
-                                                   -- An offset to be added to the normalized tensor.
-                                    -> Tensor v5 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t.
-                                                   -- If "scale_after_normalization" is true, this tensor will be multiplied
-                                                   -- with the normalized tensor.
-                                    -> Tensor Value t -- ^ __result__
-batchNormWithGlobalNormalization scale_after_normalization variance_epsilon t m
-                                 v beta gamma | eqLengthGuard [] =
-    buildOp (opDef "BatchNormWithGlobalNormalization"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "scale_after_normalization" .~ scale_after_normalization
-             & opAttr "variance_epsilon" .~ variance_epsilon)
-        t m v beta gamma
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "A small float number to avoid dividing by 0."
-  name: "variance_epsilon"
-  type: "float"
-}
-attr {
-  description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
-  name: "scale_after_normalization"
-  type: "bool"
-}
-input_arg {
-  description: "A 4D input Tensor." name: "t" type_attr: "T"
-}
-input_arg {
-  description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
-  name: "m"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
-  name: "v"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor."
-  name: "beta"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor."
-  name: "gamma"
-  type_attr: "T"
-}
-output_arg { name: "result" type_attr: "T" }
--}
-
--- | Encode strings into web-safe base64 format.
---
--- Refer to the following article for more information on base64 format:
--- en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
--- end so that the encoded has length multiple of 4. See Padding section of the
--- link above.
--- 
--- Web-safe means that the encoder uses - and _ instead of + and /.
-encodeBase64 :: Tensor v1 Data.ByteString.ByteString -- ^ __input__: Strings to be encoded.
-                -> Tensor Value Data.ByteString.ByteString -- ^ __output__: Input strings encoded in base64.
-encodeBase64 input | eqLengthGuard [] =
-    buildOp (opDef "EncodeBase64")
-        input
-{-
-attr {
-  default_value { b: false }
-  description: "Bool whether padding is applied at the ends."
-  name: "pad"
-  type: "bool"
-}
-input_arg {
-  description: "Strings to be encoded." name: "input" type: DT_STRING
-}
-output_arg {
-  description: "Input strings encoded in base64."
-  name: "output"
-  type: DT_STRING
-}
--}
-
--- | Joins the strings in the given list of string tensors into one tensor;
---
--- with the given separator (default is an empty separator).
-stringJoin :: [Tensor v1 Data.ByteString.ByteString] -- ^ __inputs__: A list of string tensors.  The tensors must all have the same shape,
-                                                     -- or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
-                                                     -- of non-scalar inputs.
-              -> Tensor Value Data.ByteString.ByteString -- ^ __output__
-stringJoin inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
-    buildOp (opDef "StringJoin"
-             & opAttr "N" .~ n)
-        inputs
-  where
-    n = fromIntegral (length inputs) :: Int64
-{-
-attr { has_minimum: true minimum: 1 name: "N" type: "int" }
-attr {
-  default_value { s: "" }
-  description: "string, an optional join separator."
-  name: "separator"
-  type: "string"
-}
-input_arg {
-  description: "A list of string tensors.  The tensors must all have the same shape,\nor be scalars.  Scalars may be mixed in; these will be broadcast to the shape\nof non-scalar inputs."
-  name: "inputs"
-  number_attr: "N"
-  type: DT_STRING
-}
-output_arg { name: "output" type: DT_STRING }
--}
-
--- | Computes the gradient of the crop_and_resize op wrt the input image tensor.
-
-cropAndResizeGradImage :: forall v1 v2 v3 v4 t . (TensorType t,
-                                                  OneOf '[Data.Word.Word16,
-                                                          Double, Float] t) =>
-                          Tensor v1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
-                          -> Tensor v2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
-                                             -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
-                                             -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
-                                             -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
-                                             -- `[0, 1]` interval of normalized image height is mapped to
-                                             -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
-                                             -- which case the sampled crop is an up-down flipped version of the original
-                                             -- image. The width dimension is treated similarly. Normalized coordinates
-                                             -- outside the `[0, 1]` range are allowed, in which case we use
-                                             -- `extrapolation_value` to extrapolate the input image values.
-                          -> Tensor v3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
-                                                      -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
-                          -> Tensor v4 Data.Int.Int32 -- ^ __image_size__: A 1-D tensor with value `[batch, image_height, image_width, depth]`
-                                                      -- containing the original image size. Both `image_height` and `image_width` need
-                                                      -- to be positive.
-                          -> Tensor Value t -- ^ __output__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
-cropAndResizeGradImage grads boxes box_ind image_size | eqLengthGuard [] =
-    buildOp (opDef "CropAndResizeGradImage"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        grads boxes box_ind image_size
-{-
-attr {
-  allowed_values {
-    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "bilinear" } }
-  default_value { s: "bilinear" }
-  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
-  name: "method"
-  type: "string"
-}
-input_arg {
-  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
-  name: "grads"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
-  name: "boxes"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
-  name: "box_ind"
-  type: DT_INT32
-}
-input_arg {
-  description: "A 1-D tensor with value `[batch, image_height, image_width, depth]`\ncontaining the original image size. Both `image_height` and `image_width` need\nto be positive."
-  name: "image_size"
-  type: DT_INT32
-}
-output_arg {
-  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes hyperbolic tangent of `x` element-wise.
-
-tanh :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                             (Data.Complex.Complex Float),
-                                             Data.Word.Word16, Double,
-                                             Float] t) => Tensor v1 t -- ^ __x__
-        -> Tensor Value t -- ^ __y__
-tanh x | eqLengthGuard [] =
-    buildOp (opDef "Tanh"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Converts each entry in the given tensor to strings.  Supports many numeric
---
--- types and boolean.
-asString :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Float),
-                                                 Bool, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Double, Float] t) =>
-            Tensor v1 t -- ^ __input__
-            -> Tensor Value Data.ByteString.ByteString -- ^ __output__
-asString input | eqLengthGuard [] =
-    buildOp (opDef "AsString"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_BOOL
-      type: DT_INT8
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { i: -1 }
-  description: "The post-decimal precision to use for floating point numbers.\nOnly used if precision > -1."
-  name: "precision"
-  type: "int"
-}
-attr {
-  default_value { b: false }
-  description: "Use scientific notation for floating point numbers."
-  name: "scientific"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "Use shortest representation (either scientific or standard) for\nfloating point numbers."
-  name: "shortest"
-  type: "bool"
-}
-attr {
-  default_value { i: -1 }
-  description: "Pad pre-decimal numbers to this width.\nApplies to both floating point and integer numbers.\nOnly used if width > -1."
-  name: "width"
-  type: "int"
-}
-attr {
-  default_value { s: "" }
-  description: "The value to pad if width > -1.  If empty, pads with spaces.\nAnother typical value is \'0\'.  String cannot be longer than 1 character."
-  name: "fill"
-  type: "string"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type: DT_STRING }
--}
-
--- | Compute the inverse 2-dimensional discrete Fourier Transform over the inner-most
---
--- 2 dimensions of `input`.
-iFFT2D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor.
-          -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2
-          --   dimensions of `input` are replaced with their inverse 2D Fourier Transform.
-          -- 
-          -- @compatibility(numpy)
-          -- Equivalent to np.ifft2
-          -- @end_compatibility
-iFFT2D input | eqLengthGuard [] =
-    buildOp (opDef "IFFT2D")
-        input
-{-
-input_arg {
-  description: "A complex64 tensor." name: "input" type: DT_COMPLEX64
-}
-output_arg {
-  description: "A complex64 tensor of the same shape as `input`. The inner-most 2\n  dimensions of `input` are replaced with their inverse 2D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.ifft2\n@end_compatibility"
-  name: "output"
-  type: DT_COMPLEX64
-}
--}
-
--- | Concatenates a list of `SparseTensor` along the specified dimension.
---
--- Concatenation is with respect to the dense versions of these sparse tensors.
--- It is assumed that each input is a `SparseTensor` whose elements are ordered
--- along increasing dimension number.
--- 
--- All inputs' shapes must match, except for the concat dimension.  The
--- `indices`, `values`, and `shapes` lists must have the same length.
--- 
--- The output shape is identical to the inputs', except along the concat
--- dimension, where it is the sum of the inputs' sizes along that dimension.
--- 
--- The output elements will be resorted to preserve the sort order along
--- increasing dimension number.
--- 
--- This op runs in `O(M log M)` time, where `M` is the total number of non-empty
--- values across all inputs. This is due to the need for an internal sort in
--- order to concatenate efficiently across an arbitrary dimension.
--- 
--- For example, if `concat_dim = 1` and the inputs are
--- 
---     sp_inputs[0]: shape = [2, 3]
---     [0, 2]: "a"
---     [1, 0]: "b"
---     [1, 1]: "c"
--- 
---     sp_inputs[1]: shape = [2, 4]
---     [0, 1]: "d"
---     [0, 2]: "e"
--- 
--- then the output will be
--- 
---     shape = [2, 7]
---     [0, 2]: "a"
---     [0, 4]: "d"
---     [0, 5]: "e"
---     [1, 0]: "b"
---     [1, 1]: "c"
--- 
--- Graphically this is equivalent to doing
--- 
---     [    a] concat [  d e  ] = [    a   d e  ]
---     [b c  ]        [       ]   [b c          ]
-sparseConcat :: forall v1 v2 v3 t . (TensorType t) =>
-                Data.Int.Int64 -- ^ __concat_dim__: Dimension to concatenate along. Must be in range [-rank, rank),
-                               -- where rank is the number of dimensions in each input `SparseTensor`.
-                -> [Tensor v1 Data.Int.Int64] -- ^ __indices__: 2-D.  Indices of each input `SparseTensor`.
-                -> [Tensor v2 t] -- ^ __values__: 1-D.  Non-empty values of each `SparseTensor`.
-                -> [Tensor v3 Data.Int.Int64] -- ^ __shapes__: 1-D.  Shapes of each `SparseTensor`.
-                -> (Tensor Value Data.Int.Int64, Tensor Value t,
-                    Tensor Value Data.Int.Int64)
-                -- ^ (__output_indices__, __output_values__, __output_shape__)
-                --
-                -- * __output_indices__: 2-D.  Indices of the concatenated `SparseTensor`.
-                --
-                -- * __output_values__: 1-D.  Non-empty values of the concatenated `SparseTensor`.
-                --
-                -- * __output_shape__: 1-D.  Shape of the concatenated `SparseTensor`.
-sparseConcat concat_dim indices values
-             shapes | eqLengthGuard [("N", [("indices", length indices),
-                                            ("values", length values),
-                                            ("shapes", length shapes)])] =
-    buildOp (opDef "SparseConcat"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "concat_dim" .~ concat_dim
-             & opAttr "N" .~ n)
-        indices values shapes
-  where
-    n = fromIntegral (length indices) :: Int64
-{-
-attr {
-  description: "Dimension to concatenate along. Must be in range [-rank, rank),\nwhere rank is the number of dimensions in each input `SparseTensor`."
-  name: "concat_dim"
-  type: "int"
-}
-attr { has_minimum: true minimum: 2 name: "N" type: "int" }
-attr { name: "T" type: "type" }
-input_arg {
-  description: "2-D.  Indices of each input `SparseTensor`."
-  name: "indices"
-  number_attr: "N"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  Non-empty values of each `SparseTensor`."
-  name: "values"
-  number_attr: "N"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  Shapes of each `SparseTensor`."
-  name: "shapes"
-  number_attr: "N"
-  type: DT_INT64
-}
-output_arg {
-  description: "2-D.  Indices of the concatenated `SparseTensor`."
-  name: "output_indices"
-  type: DT_INT64
-}
-output_arg {
-  description: "1-D.  Non-empty values of the concatenated `SparseTensor`."
-  name: "output_values"
-  type_attr: "T"
-}
-output_arg {
-  description: "1-D.  Shape of the concatenated `SparseTensor`."
-  name: "output_shape"
-  type: DT_INT64
-}
--}
-
--- | Generate a glob pattern matching all sharded file names.
-
-shardedFilespec :: Tensor v1 Data.ByteString.ByteString -- ^ __basename__
-                   -> Tensor v2 Data.Int.Int32 -- ^ __num_shards__
-                   -> Tensor Value Data.ByteString.ByteString -- ^ __filename__
-shardedFilespec basename num_shards | eqLengthGuard [] =
-    buildOp (opDef "ShardedFilespec")
-        basename num_shards
-{-
-input_arg { name: "basename" type: DT_STRING }
-input_arg { name: "num_shards" type: DT_INT32 }
-output_arg { name: "filename" type: DT_STRING }
--}
-
--- | Shuffle dimensions of x according to a permutation.
---
--- The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
---   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
-transpose :: forall v1 v2 t tperm . (TensorType t, TensorType tperm,
-                                     OneOf '[Data.Int.Int32,
-                                             Data.Int.Int64] tperm) =>
-             Tensor v1 t -- ^ __x__
-             -> Tensor v2 tperm -- ^ __perm__
-             -> Tensor Value t -- ^ __y__
-transpose x perm | eqLengthGuard [] =
-    buildOp (opDef "Transpose"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tperm" .~ tensorType (undefined :: tperm))
-        x perm
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tperm"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "perm" type_attr: "Tperm" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | Joins a string Tensor across the given dimensions.
---
--- Computes the string join across dimensions in the given string Tensor of shape
--- `[d_0, d_1, ..., d_n-1]`.  Returns a new Tensor created by joining the input
--- strings with the given separator (default: empty string).  Negative indices are
--- counted backwards from the end, with `-1` being equivalent to `n - 1`.  Passing
--- an empty `reduction_indices` joins all strings in linear index order and outputs
--- a scalar string.
--- 
--- 
--- For example:
--- 
--- ```
--- # tensor `a` is [["a", "b"], ["c", "d"]]
--- tf.reduce_join(a, 0) ==> ["ac", "bd"]
--- tf.reduce_join(a, 1) ==> ["ab", "cd"]
--- tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
--- tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
--- tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
--- tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
--- tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
--- tf.reduce_join(a, [0, 1]) ==> ["acbd"]
--- tf.reduce_join(a, [1, 0]) ==> ["abcd"]
--- tf.reduce_join(a, []) ==> ["abcd"]
--- ```
-reduceJoin :: Tensor v1 Data.ByteString.ByteString -- ^ __inputs__: The input to be joined.  All reduced indices must have non-zero size.
-              -> Tensor v2 Data.Int.Int32 -- ^ __reduction_indices__: The dimensions to reduce over.  Dimensions are reduced in the
-                                          -- order specified.  Omitting `reduction_indices` is equivalent to passing
-                                          -- `[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.
-              -> Tensor Value Data.ByteString.ByteString -- ^ __output__: Has shape equal to that of the input with reduced dimensions removed or
-              -- set to `1` depending on `keep_dims`.
-reduceJoin inputs reduction_indices | eqLengthGuard [] =
-    buildOp (opDef "ReduceJoin")
-        inputs reduction_indices
-{-
-attr {
-  default_value { b: false }
-  description: "If `True`, retain reduced dimensions with length `1`."
-  name: "keep_dims"
-  type: "bool"
-}
-attr {
-  default_value { s: "" }
-  description: "The separator to use when joining."
-  name: "separator"
-  type: "string"
-}
-input_arg {
-  description: "The input to be joined.  All reduced indices must have non-zero size."
-  name: "inputs"
-  type: DT_STRING
-}
-input_arg {
-  description: "The dimensions to reduce over.  Dimensions are reduced in the\norder specified.  Omitting `reduction_indices` is equivalent to passing\n`[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported."
-  name: "reduction_indices"
-  type: DT_INT32
-}
-output_arg {
-  description: "Has shape equal to that of the input with reduced dimensions removed or\nset to `1` depending on `keep_dims`."
-  name: "output"
-  type: DT_STRING
-}
--}
-
--- | Converts each string in the input Tensor to its hash mod by a number of buckets.
---
--- The hash function is deterministic on the content of the string within the
--- process.
--- 
--- Note that the hash function may change from time to time.
--- This functionality will be deprecated and it's recommended to use
--- `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
-stringToHashBucket :: Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
-                      -> Tensor v1 Data.ByteString.ByteString -- ^ __string_tensor__
-                      -> Tensor Value Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
-stringToHashBucket num_buckets string_tensor | eqLengthGuard [] =
-    buildOp (opDef "StringToHashBucket"
-             & opAttr "num_buckets" .~ num_buckets)
-        string_tensor
-{-
-attr {
-  description: "The number of buckets."
-  has_minimum: true
-  minimum: 1
-  name: "num_buckets"
-  type: "int"
-}
-input_arg { name: "string_tensor" type: DT_STRING }
-output_arg {
-  description: "A Tensor of the same shape as the input `string_tensor`."
-  name: "output"
-  type: DT_INT64
-}
--}
-
--- | Draws samples from a multinomial distribution.
-
-multinomial :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                       Data.Int.Int32,
-                                                       Data.Int.Int64,
-                                                       Data.Int.Int8,
-                                                       Data.Word.Word16,
-                                                       Data.Word.Word8, Double,
-                                                       Float] t) =>
-               Tensor v1 t -- ^ __logits__: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
-                           -- represents the unnormalized log probabilities for all classes.
-               -> Tensor v2 Data.Int.Int32 -- ^ __num_samples__: 0-D.  Number of independent samples to draw for each row slice.
-               -> Build (Tensor Value Data.Int.Int64) -- ^ __output__: 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
-               -- contains the drawn class labels with range `[0, num_classes)`.
-multinomial logits num_samples | eqLengthGuard [] =
-    buildOp (opDef "Multinomial"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        logits num_samples
-{-
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 is set to be non-zero, the internal random number\ngenerator is seeded by the given seed.  Otherwise, a random seed is used."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "A second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`\nrepresents the unnormalized log probabilities for all classes."
-  name: "logits"
-  type_attr: "T"
-}
-input_arg {
-  description: "0-D.  Number of independent samples to draw for each row slice."
-  name: "num_samples"
-  type: DT_INT32
-}
-output_arg {
-  description: "2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`\ncontains the drawn class labels with range `[0, num_classes)`."
-  name: "output"
-  type: DT_INT64
-}
--}
-
--- | Converts each string in the input Tensor to its hash mod by a number of buckets.
---
--- The hash function is deterministic on the content of the string within the
--- process. The hash function is a keyed hash function, where attribute `key`
--- defines the key of the hash function. `key` is an array of 2 elements.
--- 
--- A strong hash is important when inputs may be malicious, e.g. URLs with
--- additional components. Adversaries could try to make their inputs hash to the
--- same bucket for a denial-of-service attack or to skew the results. A strong
--- hash prevents this by making it dificult, if not infeasible, to compute inputs
--- that hash to the same bucket. This comes at a cost of roughly 4x higher compute
--- time than `tf.string_to_hash_bucket_fast`.
-stringToHashBucketStrong :: Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
-                            -> Tensor v1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket.
-                            -> Tensor Value Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
-stringToHashBucketStrong num_buckets input | eqLengthGuard [] =
-    buildOp (opDef "StringToHashBucketStrong"
-             & opAttr "num_buckets" .~ num_buckets)
-        input
-{-
-attr {
-  description: "The number of buckets."
-  has_minimum: true
-  minimum: 1
-  name: "num_buckets"
-  type: "int"
-}
-attr {
-  description: "The key for the keyed hash function passed as a list of two uint64\nelements."
-  name: "key"
-  type: "list(int)"
-}
-input_arg {
-  description: "The strings to assign a hash bucket."
-  name: "input"
-  type: DT_STRING
-}
-output_arg {
-  description: "A Tensor of the same shape as the input `string_tensor`."
-  name: "output"
-  type: DT_INT64
-}
--}
-
--- | Applies sparse `updates` to individual values or slices within a given
---
--- variable according to `indices`.
--- 
--- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
--- 
--- `indices` must be integer tensor, containing indices into `ref`.
--- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
--- 
--- The innermost dimension of `indices` (with length `K`) corresponds to
--- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
--- dimension of `ref`.
--- 
--- `updates` is `Tensor` of rank `Q-1+P-K` with shape:
--- 
--- ```
--- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
--- ```
--- 
--- For example, say we want to update 4 scattered elements to a rank-1 tensor to
--- 8 elements. In Python, that update would look like this:
--- 
---     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
---     indices = tf.constant([[4], [3], [1] ,[7]])
---     updates = tf.constant([9, 10, 11, 12])
---     update = tf.scatter_nd_update(ref, indices, updates)
---     with tf.Session() as sess:
---       print sess.run(update)
--- 
--- The resulting update to ref would look like this:
--- 
---     [1, 11, 3, 10, 9, 6, 7, 12]
--- 
--- See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to
--- slices.
-scatterNdUpdate :: forall v2 v3 t tindices . (TensorType t, TensorType tindices,
-                                              OneOf '[Data.Int.Int32,
-                                                      Data.Int.Int64] tindices) =>
-                   Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node.
-                   -> Tensor v2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.
-                                         -- A tensor of indices into ref.
-                   -> Tensor v3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated
-                                  -- values to add to ref.
-                   -> Build (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want to
-                   -- use the updated values after the update is done.
-scatterNdUpdate ref indices updates | eqLengthGuard [] =
-    buildOp (opDef "ScatterNdUpdate"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        ref indices updates
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: true }
-  description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "A mutable Tensor. Should be from a Variable node."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-input_arg {
-  description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "A Tensor. Must have the same type as ref. A tensor of updated\nvalues to add to ref."
-  name: "updates"
-  type_attr: "T"
-}
-output_arg {
-  description: "Same as ref. Returned as a convenience for operations that want to\nuse the updated values after the update is done."
-  is_ref: true
-  name: "output_ref"
-  type_attr: "T"
-}
--}
-
--- | Compute gradients for a FakeQuantWithMinMaxVars operation.
-
-fakeQuantWithMinMaxVarsGradient :: Tensor v1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
-                                   -> Tensor v2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
-                                                      -- min, max: Quantization interval, scalar floats.
-                                   -> Tensor v3 Float -- ^ __min__
-                                   -> Tensor v4 Float -- ^ __max__
-                                   -> (Tensor Value Float, Tensor Value Float,
-                                       Tensor Value Float)
-                                   -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__)
-                                   --
-                                   -- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs:
-                                   -- `gradients * (inputs >= min && inputs <= max)`.
-                                   --
-                                   -- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter:
-                                   -- `sum(gradients * (inputs < min))`.
-                                   --
-                                   -- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter:
-                                   -- `sum(gradients * (inputs > max))`.
-fakeQuantWithMinMaxVarsGradient gradients inputs min max | eqLengthGuard [] =
-    buildOp (opDef "FakeQuantWithMinMaxVarsGradient")
-        gradients inputs min max
-{-
-input_arg {
-  description: "Backpropagated gradients above the FakeQuantWithMinMaxVars operation."
-  name: "gradients"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "Values passed as inputs to the FakeQuantWithMinMaxVars operation.\nmin, max: Quantization interval, scalar floats."
-  name: "inputs"
-  type: DT_FLOAT
-}
-input_arg { name: "min" type: DT_FLOAT }
-input_arg { name: "max" type: DT_FLOAT }
-output_arg {
-  description: "Backpropagated gradients w.r.t. inputs:\n`gradients * (inputs >= min && inputs <= max)`."
-  name: "backprops_wrt_input"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "Backpropagated gradients w.r.t. min parameter:\n`sum(gradients * (inputs < min))`."
-  name: "backprop_wrt_min"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "Backpropagated gradients w.r.t. max parameter:\n`sum(gradients * (inputs > max))`."
-  name: "backprop_wrt_max"
-  type: DT_FLOAT
-}
--}
-
--- | Returns the size of a tensor.
---
--- This operation returns an integer representing the number of elements in
--- `input`.
--- 
--- For example:
--- 
--- ```prettyprint
--- # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
--- size(t) ==> 12
--- ```
-size :: forall v1 t out_type . (TensorType t, TensorType out_type,
-                                OneOf '[Data.Int.Int32,
-                                        Data.Int.Int64] out_type) =>
-        Tensor v1 t -- ^ __input__
-        -> Tensor Value out_type -- ^ __output__
-size input | eqLengthGuard [] =
-    buildOp (opDef "Size"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        input
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "out_type"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "out_type" }
--}
-
--- | Divides a variable reference by sparse updates.
---
--- This operation computes
--- 
---     # Scalar indices
---     ref[indices, ...] /= updates[...]
--- 
---     # Vector indices (for each i)
---     ref[indices[i], ...] /= updates[i, ...]
--- 
---     # High rank indices (for each i, ..., j)
---     ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
--- 
--- This operation outputs `ref` after the update is done.
--- This makes it easier to chain operations that need to use the reset value.
--- 
--- Duplicate entries are handled correctly: if multiple `indices` reference
--- the same location, their contributions divide.
--- 
--- Requires `updates.shape = indices.shape + ref.shape[1:]`.
-scatterDiv :: forall v2 v3 t tindices . (TensorType t,
-                                         OneOf '[(Data.Complex.Complex Double),
-                                                 (Data.Complex.Complex Float),
-                                                 Data.Int.Int16, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Data.Word.Word16,
-                                                 Data.Word.Word8, Double,
-                                                 Float] t, TensorType tindices,
-                                         OneOf '[Data.Int.Int32,
-                                                 Data.Int.Int64] tindices) =>
-              Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
-              -> Tensor v2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
-              -> Tensor v3 t -- ^ __updates__: A tensor of values that `ref` is divided by.
-              -> Build (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
-              -- to use the updated values after the update is done.
-scatterDiv ref indices updates | eqLengthGuard [] =
-    buildOp (opDef "ScatterDiv"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        ref indices updates
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a `Variable` node."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-input_arg {
-  description: "A tensor of indices into the first dimension of `ref`."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "A tensor of values that `ref` is divided by."
-  name: "updates"
-  type_attr: "T"
-}
-output_arg {
-  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
-  is_ref: true
-  name: "output_ref"
-  type_attr: "T"
-}
--}
-
--- | Multiplies sparse updates into a variable reference.
---
--- This operation computes
--- 
---     # Scalar indices
---     ref[indices, ...] *= updates[...]
--- 
---     # Vector indices (for each i)
---     ref[indices[i], ...] *= updates[i, ...]
--- 
---     # High rank indices (for each i, ..., j)
---     ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
--- 
--- This operation outputs `ref` after the update is done.
--- This makes it easier to chain operations that need to use the reset value.
--- 
--- Duplicate entries are handled correctly: if multiple `indices` reference
--- the same location, their contributions multiply.
--- 
--- Requires `updates.shape = indices.shape + ref.shape[1:]`.
-scatterMul :: forall v2 v3 t tindices . (TensorType t,
-                                         OneOf '[(Data.Complex.Complex Double),
-                                                 (Data.Complex.Complex Float),
-                                                 Data.Int.Int16, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Data.Word.Word16,
-                                                 Data.Word.Word8, Double,
-                                                 Float] t, TensorType tindices,
-                                         OneOf '[Data.Int.Int32,
-                                                 Data.Int.Int64] tindices) =>
-              Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
-              -> Tensor v2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
-              -> Tensor v3 t -- ^ __updates__: A tensor of updated values to multiply to `ref`.
-              -> Build (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
-              -- to use the updated values after the update is done.
-scatterMul ref indices updates | eqLengthGuard [] =
-    buildOp (opDef "ScatterMul"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        ref indices updates
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a `Variable` node."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-input_arg {
-  description: "A tensor of indices into the first dimension of `ref`."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "A tensor of updated values to multiply to `ref`."
-  name: "updates"
-  type_attr: "T"
-}
-output_arg {
-  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
-  is_ref: true
-  name: "output_ref"
-  type_attr: "T"
-}
--}
-
--- | Copy Host Op.
---
--- Performs CPU-to-CPU deep-copying of tensor.
--- 
--- Unlike the Copy Op, this op has HostMemory constraint on its input or output.
-copyHost :: forall v1 t . (TensorType t) =>
-            Tensor v1 t -- ^ __input__: Input tensor.
-            -> Tensor Value t -- ^ __output__: Output tensor, deep-copied from input.
-copyHost input | eqLengthGuard [] =
-    buildOp (opDef "CopyHost"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { name: "T" type: "type" }
-attr {
-  default_value { s: "" }
-  description: "The name of the input tensor."
-  name: "tensor_name"
-  type: "string"
-}
-input_arg {
-  description: "Input tensor." name: "input" type_attr: "T"
-}
-output_arg {
-  description: "Output tensor, deep-copied from input."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | A Reader that outputs the entire contents of a file as a value.
---
--- To use, enqueue filenames in a Queue.  The output of ReaderRead will
--- be a filename (key) and the contents of that file (value).
-wholeFileReader :: Build (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
-wholeFileReader  | eqLengthGuard [] =
-    buildOp (opDef "WholeFileReader")
-        
-{-
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
-  name: "shared_name"
-  type: "string"
-}
-output_arg {
-  description: "The handle to reference the Reader."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
--}
-
--- | Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
---
--- The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
--- `N` is the minibatch size and the rows correspond to the output handles of
--- `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`.  The ranks of the
--- original `SparseTensor` objects that went into the given input ops must all
--- match.  When the final `SparseTensor` is created, it has rank one
--- higher than the ranks of the incoming `SparseTensor` objects
--- (they have been concatenated along a new row dimension on the left).
--- 
--- The output `SparseTensor` object's shape values for all dimensions but the
--- first are the max across the input `SparseTensor` objects' shape values
--- for the corresponding dimensions.  Its first shape value is `N`, the minibatch
--- size.
--- 
--- The input `SparseTensor` objects' indices are assumed ordered in
--- standard lexicographic order.  If this is not the case, after this
--- step run `SparseReorder` to restore index ordering.
--- 
--- For example, if the handles represent an input, which is a `[2, 3]` matrix
--- representing two original `SparseTensor` objects:
--- 
--- ```
---     index = [ 0]
---             [10]
---             [20]
---     values = [1, 2, 3]
---     shape = [50]
--- ```
--- 
--- and
--- 
--- ```
---     index = [ 2]
---             [10]
---     values = [4, 5]
---     shape = [30]
--- ```
--- 
--- then the final `SparseTensor` will be:
--- 
--- ```
---     index = [0  0]
---             [0 10]
---             [0 20]
---             [1  2]
---             [1 10]
---     values = [1, 2, 3, 4, 5]
---     shape = [2 50]
--- ```
-takeManySparseFromTensorsMap :: forall v1 dtype . (TensorType dtype) =>
-                                Tensor v1 Data.Int.Int64 -- ^ __sparse_handles__: 1-D, The `N` serialized `SparseTensor` objects.
-                                                         -- Shape: `[N]`.
-                                -> Build ((Tensor Value Data.Int.Int64,
-                                           Tensor Value dtype,
-                                           Tensor Value Data.Int.Int64))
-                                -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__)
-                                --
-                                -- * __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.
-                                --
-                                -- * __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.
-                                --
-                                -- * __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.
-takeManySparseFromTensorsMap sparse_handles | eqLengthGuard [] =
-    buildOp (opDef "TakeManySparseFromTensorsMap"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        sparse_handles
-{-
-attr {
-  description: "The `dtype` of the `SparseTensor` objects stored in the\n`SparseTensorsMap`."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  default_value { s: "" }
-  description: "The container name for the `SparseTensorsMap` read by this op."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "The shared name for the `SparseTensorsMap` read by this op.\nIt should not be blank; rather the `shared_name` or unique Operation name\nof the Op that created the original `SparseTensorsMap` should be used."
-  name: "shared_name"
-  type: "string"
-}
-input_arg {
-  description: "1-D, The `N` serialized `SparseTensor` objects.\nShape: `[N]`."
-  name: "sparse_handles"
-  type: DT_INT64
-}
-output_arg {
-  description: "2-D.  The `indices` of the minibatch `SparseTensor`."
-  name: "sparse_indices"
-  type: DT_INT64
-}
-output_arg {
-  description: "1-D.  The `values` of the minibatch `SparseTensor`."
-  name: "sparse_values"
-  type_attr: "dtype"
-}
-output_arg {
-  description: "1-D.  The `shape` of the minibatch `SparseTensor`."
-  name: "sparse_shape"
-  type: DT_INT64
-}
--}
-
--- | Destroys the temporary variable and returns its final value.
---
--- Sets output to the value of the Tensor pointed to by 'ref', then destroys
--- the temporary variable called 'var_name'.
--- All other uses of 'ref' *must* have executed before this op.
--- This is typically achieved by chaining the ref through each assign op, or by
--- using control dependencies.
--- 
--- Outputs the final value of the tensor pointed to by 'ref'.
-destroyTemporaryVariable :: forall t . (TensorType t) =>
-                            Tensor Ref t -- ^ __ref__: A reference to the temporary variable tensor.
-                            -> Build (Tensor Value t) -- ^ __value__
-destroyTemporaryVariable ref | eqLengthGuard [] =
-    buildOp (opDef "DestroyTemporaryVariable"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        ref
-{-
-attr { name: "T" type: "type" }
-attr {
-  description: "Name of the temporary variable, usually the name of the matching\n\'TemporaryVariable\' op."
-  name: "var_name"
-  type: "string"
-}
-input_arg {
-  description: "A reference to the temporary variable tensor."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-output_arg { name: "value" type_attr: "T" }
--}
-
--- | Update 'ref' by subtracting 'value' from it.
---
--- This operation outputs "ref" after the update is done.
--- This makes it easier to chain operations that need to use the reset value.
-assignSub :: forall v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                  (Data.Complex.Complex Float),
-                                                  Data.Int.Int16,
-                                                  Data.Int.Int32,
-                                                  Data.Int.Int64, Data.Int.Int8,
-                                                  Data.Word.Word16,
-                                                  Data.Word.Word8, Double,
-                                                  Float] t) =>
-             Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
-             -> Tensor v2 t -- ^ __value__: The value to be subtracted to the variable.
-             -> Build (Tensor Ref t) -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
-             -- to use the new value after the variable has been updated.
-assignSub ref value | eqLengthGuard [] =
-    buildOp (opDef "AssignSub"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        ref value
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a `Variable` node."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-input_arg {
-  description: "The value to be subtracted to the variable."
-  name: "value"
-  type_attr: "T"
-}
-output_arg {
-  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been updated."
-  is_ref: true
-  name: "output_ref"
-  type_attr: "T"
-}
--}
-
--- | JPEG-encode an image.
---
--- `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
--- 
--- The attr `format` can be used to override the color format of the encoded
--- output.  Values can be:
--- 
--- *   `''`: Use a default format based on the number of channels in the image.
--- *   `grayscale`: Output a grayscale JPEG image.  The `channels` dimension
---     of `image` must be 1.
--- *   `rgb`: Output an RGB JPEG image. The `channels` dimension
---     of `image` must be 3.
--- 
--- If `format` is not specified or is the empty string, a default format is picked
--- in function of the number of channels in `image`:
--- 
--- *   1: Output a grayscale image.
--- *   3: Output an RGB image.
-encodeJpeg :: Tensor v1 Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`.
-              -> Tensor Value Data.ByteString.ByteString -- ^ __contents__: 0-D. JPEG-encoded image.
-encodeJpeg image | eqLengthGuard [] =
-    buildOp (opDef "EncodeJpeg")
-        image
-{-
-attr {
-  allowed_values { list { s: "" s: "grayscale" s: "rgb" } }
-  default_value { s: "" }
-  description: "Per pixel image format."
-  name: "format"
-  type: "string"
-}
-attr {
-  default_value { i: 95 }
-  description: "Quality of the compression from 0 to 100 (higher is better and slower)."
-  name: "quality"
-  type: "int"
-}
-attr {
-  default_value { b: false }
-  description: "If True, create a JPEG that loads progressively (coarse to fine)."
-  name: "progressive"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "If True, spend CPU/RAM to reduce size with no quality change."
-  name: "optimize_size"
-  type: "bool"
-}
-attr {
-  default_value { b: true }
-  description: "See http://en.wikipedia.org/wiki/Chroma_subsampling."
-  name: "chroma_downsampling"
-  type: "bool"
-}
-attr {
-  allowed_values { list { s: "in" s: "cm" } }
-  default_value { s: "in" }
-  description: "Unit used to specify `x_density` and `y_density`:\npixels per inch (`\'in\'`) or centimeter (`\'cm\'`)."
-  name: "density_unit"
-  type: "string"
-}
-attr {
-  default_value { i: 300 }
-  description: "Horizontal pixels per density unit."
-  name: "x_density"
-  type: "int"
-}
-attr {
-  default_value { i: 300 }
-  description: "Vertical pixels per density unit."
-  name: "y_density"
-  type: "int"
-}
-attr {
-  default_value { s: "" }
-  description: "If not empty, embed this XMP metadata in the image header."
-  name: "xmp_metadata"
-  type: "string"
-}
-input_arg {
-  description: "3-D with shape `[height, width, channels]`."
-  name: "image"
-  type: DT_UINT8
-}
-output_arg {
-  description: "0-D. JPEG-encoded image."
-  name: "contents"
-  type: DT_STRING
-}
--}
-
--- | Returns a tensor that may be mutated, but only persists within a single step.
---
--- This is an experimental op for internal use only and it is possible to use this
--- op in unsafe ways.  DO NOT USE unless you fully understand the risks.
--- 
--- It is the caller's responsibility to ensure that 'ref' is eventually passed to a
--- matching 'DestroyTemporaryVariable' op after all other uses have completed.
--- 
--- Outputs a ref to the tensor state so it may be read or modified.
--- 
---   E.g.
---       var = state_ops._temporary_variable([1, 2], types.float_)
---       var_name = var.op.name
---       var = state_ops.assign(var, [[4.0, 5.0]])
---       var = state_ops.assign_add(var, [[6.0, 7.0]])
---       final = state_ops._destroy_temporary_variable(var, var_name=var_name)
-temporaryVariable :: forall dtype . (TensorType dtype) =>
-                     Shape -- ^ __shape__: The shape of the variable tensor.
-                     -> Build (Tensor Ref dtype) -- ^ __ref__: A reference to the variable tensor.
-temporaryVariable shape | eqLengthGuard [] =
-    buildOp (opDef "TemporaryVariable"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "shape" .~ shape)
-        
-{-
-attr {
-  description: "The shape of the variable tensor."
-  name: "shape"
-  type: "shape"
-}
-attr {
-  description: "The type of elements in the variable tensor."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  default_value { s: "" }
-  description: "Overrides the name used for the temporary variable resource. Default\nvalue is the name of the \'TemporaryVariable\' op (which is guaranteed unique)."
-  name: "var_name"
-  type: "string"
-}
-output_arg {
-  description: "A reference to the variable tensor."
-  is_ref: true
-  name: "ref"
-  type_attr: "dtype"
-}
--}
-
--- | Checks whether a tensor has been initialized.
---
--- Outputs boolean scalar indicating whether the tensor has been initialized.
-isVariableInitialized :: forall dtype . (TensorType dtype) =>
-                         Tensor Ref dtype -- ^ __ref__: Should be from a `Variable` node. May be uninitialized.
-                         -> Build (Tensor Value Bool) -- ^ __is_initialized__
-isVariableInitialized ref | eqLengthGuard [] =
-    buildOp (opDef "IsVariableInitialized"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        ref
-{-
-attr {
-  description: "The type of elements in the variable tensor."
-  name: "dtype"
-  type: "type"
-}
-input_arg {
-  description: "Should be from a `Variable` node. May be uninitialized."
-  is_ref: true
-  name: "ref"
-  type_attr: "dtype"
-}
-output_arg { name: "is_initialized" type: DT_BOOL }
--}
-
--- | Holds state in the form of a tensor that persists across steps.
---
--- Outputs a ref to the tensor state so it may be read or modified.
--- TODO(zhifengc/mrry): Adds a pointer to a more detail document
--- about sharing states in tensorflow.
-variable :: forall dtype . (TensorType dtype) =>
-            Shape -- ^ __shape__: The shape of the variable tensor.
-            -> Build (Tensor Ref dtype) -- ^ __ref__: A reference to the variable tensor.
-variable shape | eqLengthGuard [] =
-    buildOp (opDef "Variable"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "shape" .~ shape)
-        
-{-
-attr {
-  description: "The shape of the variable tensor."
-  name: "shape"
-  type: "shape"
-}
-attr {
-  description: "The type of elements in the variable tensor."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this variable is placed in the given container.\nOtherwise, a default container is used."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this variable is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
-  name: "shared_name"
-  type: "string"
-}
-output_arg {
-  description: "A reference to the variable tensor."
-  is_ref: true
-  name: "ref"
-  type_attr: "dtype"
-}
--}
-
--- | Returns the element-wise min of two SparseTensors.
---
--- Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
-sparseSparseMinimum :: forall v1 v2 v3 v4 v5 v6 t . (TensorType t,
-                                                     OneOf '[(Data.Complex.Complex Double),
-                                                             (Data.Complex.Complex Float),
-                                                             Data.Int.Int16,
-                                                             Data.Int.Int32,
-                                                             Data.Int.Int64,
-                                                             Data.Int.Int8,
-                                                             Data.Word.Word16,
-                                                             Data.Word.Word8,
-                                                             Double,
-                                                             Float] t) =>
-                       Tensor v1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
-                                                -- SparseTensor, in the canonical lexicographic ordering.
-                       -> Tensor v2 t -- ^ __a_values__: 1-D.  `N` non-empty values corresponding to `a_indices`.
-                       -> Tensor v3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  Shape of the input SparseTensor.
-                       -> Tensor v4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand.
-                       -> Tensor v5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype.
-                       -> Tensor v6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal.
-                       -> (Tensor Value Data.Int.Int64, Tensor Value t)
-                       -- ^ (__output_indices__, __output_values__)
-                       --
-                       -- * __output_indices__: 2-D.  The indices of the output SparseTensor.
-                       --
-                       -- * __output_values__: 1-D.  The values of the output SparseTensor.
-sparseSparseMinimum a_indices a_values a_shape b_indices b_values
-                    b_shape | eqLengthGuard [] =
-    buildOp (opDef "SparseSparseMinimum"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        a_indices a_values a_shape b_indices b_values b_shape
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering."
-  name: "a_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  `N` non-empty values corresponding to `a_indices`."
-  name: "a_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  Shape of the input SparseTensor."
-  name: "a_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "counterpart to `a_indices` for the other operand."
-  name: "b_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "counterpart to `a_values` for the other operand; must be of the same dtype."
-  name: "b_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "counterpart to `a_shape` for the other operand; the two shapes must be equal."
-  name: "b_shape"
-  type: DT_INT64
-}
-output_arg {
-  description: "2-D.  The indices of the output SparseTensor."
-  name: "output_indices"
-  type: DT_INT64
-}
-output_arg {
-  description: "1-D.  The values of the output SparseTensor."
-  name: "output_values"
-  type_attr: "T"
-}
--}
-
--- | Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
---
--- The regularized incomplete beta integral is defined as:
--- 
--- ```
--- I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}
--- ```
--- where
--- 
--- ```
--- B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt
--- ```
--- 
--- is the incomplete beta function and \\(B(a, b)\\) is the *complete*
--- beta function.
-betainc :: forall v1 v2 v3 t . (TensorType t, OneOf '[Double, Float] t) =>
-           Tensor v1 t -- ^ __a__
-           -> Tensor v2 t -- ^ __b__
-           -> Tensor v3 t -- ^ __x__
-           -> Tensor Value t -- ^ __z__
-betainc a b x | eqLengthGuard [] =
-    buildOp (opDef "Betainc"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        a b x
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "a" type_attr: "T" }
-input_arg { name: "b" type_attr: "T" }
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Update 'ref' by assigning 'value' to it.
---
--- This operation outputs "ref" after the assignment is done.
--- This makes it easier to chain operations that need to use the reset value.
-assign :: forall v2 t . (TensorType t) =>
-          Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. May be uninitialized.
-          -> Tensor v2 t -- ^ __value__: The value to be assigned to the variable.
-          -> Build (Tensor Ref t) -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
-          -- to use the new value after the variable has been reset.
-assign ref value | eqLengthGuard [] =
-    buildOp (opDef "Assign"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        ref value
-{-
-attr { name: "T" type: "type" }
-attr {
-  default_value { b: true }
-  description: "If true, the operation will validate that the shape\nof \'value\' matches the shape of the Tensor being assigned to.  If false,\n\'ref\' will take on the shape of \'value\'."
-  name: "validate_shape"
-  type: "bool"
-}
-attr {
-  default_value { b: true }
-  description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a `Variable` node. May be uninitialized."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-input_arg {
-  description: "The value to be assigned to the variable."
-  name: "value"
-  type_attr: "T"
-}
-output_arg {
-  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been reset."
-  is_ref: true
-  name: "output_ref"
-  type_attr: "T"
-}
--}
-
--- | Applies softmax to a batched N-D `SparseTensor`.
---
--- The inputs represent an N-D SparseTensor  with logical shape `[..., B, C]`
--- (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
--- 
--- This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
--- logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
--- zero elements do not participate*.  Specifically, the algorithm is equivalent
--- to the following:
--- 
---   (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
---       with shape `[B, C]`, along the size-C dimension;
---   (2) Masks out the original implicitly-zero locations;
---   (3) Renormalizes the remaining elements.
--- 
--- Hence, the `SparseTensor` result has exactly the same non-zero indices and
--- shape.
-sparseSoftmax :: forall v1 v2 v3 t . (TensorType t, OneOf '[Double, Float] t) =>
-                 Tensor v1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `NNZ x R` matrix with the indices of non-empty values in a
-                                          -- SparseTensor, in canonical ordering.
-                 -> Tensor v2 t -- ^ __sp_values__: 1-D.  `NNZ` non-empty values corresponding to `sp_indices`.
-                 -> Tensor v3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
-                 -> Tensor Value t -- ^ __output__: 1-D.  The `NNZ` values for the result `SparseTensor`.
-sparseSoftmax sp_indices sp_values sp_shape | eqLengthGuard [] =
-    buildOp (opDef "SparseSoftmax"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        sp_indices sp_values sp_shape
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "2-D.  `NNZ x R` matrix with the indices of non-empty values in a\nSparseTensor, in canonical ordering."
-  name: "sp_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  `NNZ` non-empty values corresponding to `sp_indices`."
-  name: "sp_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  Shape of the input SparseTensor."
-  name: "sp_shape"
-  type: DT_INT64
-}
-output_arg {
-  description: "1-D.  The `NNZ` values for the result `SparseTensor`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Adds up a SparseTensor and a dense Tensor, using these special rules:
---
--- (1) Broadcasts the dense side to have the same shape as the sparse side, if
---     eligible;
--- (2) Then, only the dense values pointed to by the indices of the SparseTensor
---     participate in the cwise addition.
--- 
--- By these rules, the result is a logical SparseTensor with exactly the same
--- indices and shape, but possibly with different non-zero values.  The output of
--- this Op is the resultant non-zero values.
-sparseDenseCwiseAdd :: forall v1 v2 v3 v4 t . (TensorType t,
-                                               OneOf '[(Data.Complex.Complex Double),
-                                                       (Data.Complex.Complex Float),
-                                                       Data.Int.Int16,
-                                                       Data.Int.Int32,
-                                                       Data.Int.Int64,
-                                                       Data.Int.Int8,
-                                                       Data.Word.Word16,
-                                                       Data.Word.Word8, Double,
-                                                       Float] t) =>
-                       Tensor v1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
-                                                -- SparseTensor, possibly not in canonical ordering.
-                       -> Tensor v2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
-                       -> Tensor v3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
-                       -> Tensor v4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
-                       -> Tensor Value t -- ^ __output__: 1-D.  The `N` values that are operated on.
-sparseDenseCwiseAdd sp_indices sp_values sp_shape dense | eqLengthGuard [] =
-    buildOp (opDef "SparseDenseCwiseAdd"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        sp_indices sp_values sp_shape dense
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
-  name: "sp_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
-  name: "sp_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  Shape of the input SparseTensor."
-  name: "sp_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "`R`-D.  The dense Tensor operand."
-  name: "dense"
-  type_attr: "T"
-}
-output_arg {
-  description: "1-D.  The `N` values that are operated on."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Returns the truth value of NOT x element-wise.
-
-logicalNot :: Tensor v1 Bool -- ^ __x__
-              -> Tensor Value Bool -- ^ __y__
-logicalNot x | eqLengthGuard [] =
-    buildOp (opDef "LogicalNot")
-        x
-{-
-input_arg { name: "x" type: DT_BOOL }
-output_arg { name: "y" type: DT_BOOL }
--}
-
--- | Computes the number of elements in the given queue.
-
-queueSize :: Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
-             -> Build (Tensor Value Data.Int.Int32) -- ^ __size__: The number of elements in the given queue.
-queueSize handle | eqLengthGuard [] =
-    buildOp (opDef "QueueSize")
-        handle
-{-
-input_arg {
-  description: "The handle to a queue."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-output_arg {
-  description: "The number of elements in the given queue."
-  name: "size"
-  type: DT_INT32
-}
--}
-
--- | Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
---
--- That is for rows we have grad for, we update var and accum as follows:
--- accum += grad * grad
--- var -= lr * grad * (1 / sqrt(accum))
-sparseApplyAdagrad :: forall v3 v4 v5 t tindices . (TensorType t,
-                                                    OneOf '[(Data.Complex.Complex Double),
-                                                            (Data.Complex.Complex Float),
-                                                            Data.Int.Int16,
-                                                            Data.Int.Int32,
-                                                            Data.Int.Int64,
-                                                            Data.Int.Int8,
-                                                            Data.Word.Word16,
-                                                            Data.Word.Word8,
-                                                            Double, Float] t,
-                                                    TensorType tindices,
-                                                    OneOf '[Data.Int.Int32,
-                                                            Data.Int.Int64] tindices) =>
-                      Tensor Ref t -- ^ __var__: Should be from a Variable().
-                      -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
-                      -> Tensor v3 t -- ^ __lr__: Learning rate. Must be a scalar.
-                      -> Tensor v4 t -- ^ __grad__: The gradient.
-                      -> Tensor v5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
-                      -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-sparseApplyAdagrad var accum lr grad indices | eqLengthGuard [] =
-    buildOp (opDef "SparseApplyAdagrad"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        var accum lr grad indices
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "accum"
-  type_attr: "T"
-}
-input_arg {
-  description: "Learning rate. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "A vector of indices into the first dimension of var and accum."
-  name: "indices"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Store the input tensor in the state of the current session.
-
-getSessionHandle :: forall v1 t . (TensorType t) =>
-                    Tensor v1 t -- ^ __value__: The tensor to be stored.
-                    -> Tensor Value Data.ByteString.ByteString -- ^ __handle__: The handle for the tensor stored in the session state.
-getSessionHandle value | eqLengthGuard [] =
-    buildOp (opDef "GetSessionHandle"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        value
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The tensor to be stored."
-  name: "value"
-  type_attr: "T"
-}
-output_arg {
-  description: "The handle for the tensor stored in the session state."
-  name: "handle"
-  type: DT_STRING
-}
--}
-
--- | Component-wise multiplies a SparseTensor by a dense Tensor.
---
--- The output locations corresponding to the implicitly zero elements in the sparse
--- tensor will be zero (i.e., will not take up storage space), regardless of the
--- contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
--- 
--- *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
--- the other direction.
-sparseDenseCwiseMul :: forall v1 v2 v3 v4 t . (TensorType t,
-                                               OneOf '[(Data.Complex.Complex Double),
-                                                       (Data.Complex.Complex Float),
-                                                       Data.Int.Int16,
-                                                       Data.Int.Int32,
-                                                       Data.Int.Int64,
-                                                       Data.Int.Int8,
-                                                       Data.Word.Word16,
-                                                       Data.Word.Word8, Double,
-                                                       Float] t) =>
-                       Tensor v1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
-                                                -- SparseTensor, possibly not in canonical ordering.
-                       -> Tensor v2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
-                       -> Tensor v3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
-                       -> Tensor v4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
-                       -> Tensor Value t -- ^ __output__: 1-D.  The `N` values that are operated on.
-sparseDenseCwiseMul sp_indices sp_values sp_shape dense | eqLengthGuard [] =
-    buildOp (opDef "SparseDenseCwiseMul"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        sp_indices sp_values sp_shape dense
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
-  name: "sp_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
-  name: "sp_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  Shape of the input SparseTensor."
-  name: "sp_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "`R`-D.  The dense Tensor operand."
-  name: "dense"
-  type_attr: "T"
-}
-output_arg {
-  description: "1-D.  The `N` values that are operated on."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
---
--- This Op does not require `a_indices` be sorted in standard lexicographic order.
-sparseTensorDenseAdd :: forall v1 v2 v3 v4 t tindices . (TensorType t,
-                                                         OneOf '[(Data.Complex.Complex Double),
-                                                                 (Data.Complex.Complex Float),
-                                                                 Data.Int.Int16,
-                                                                 Data.Int.Int32,
-                                                                 Data.Int.Int64,
-                                                                 Data.Int.Int8,
-                                                                 Data.Word.Word16,
-                                                                 Data.Word.Word8,
-                                                                 Double,
-                                                                 Float] t,
-                                                         TensorType tindices,
-                                                         OneOf '[Data.Int.Int32,
-                                                                 Data.Int.Int64] tindices) =>
-                        Tensor v1 tindices -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
-                        -> Tensor v2 t -- ^ __a_values__: 1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`.
-                        -> Tensor v3 tindices -- ^ __a_shape__: 1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`.
-                        -> Tensor v4 t -- ^ __b__: `ndims`-D Tensor.  With shape `a_shape`.
-                        -> Tensor Value t -- ^ __output__
-sparseTensorDenseAdd a_indices a_values a_shape b | eqLengthGuard [] =
-    buildOp (opDef "SparseTensorDenseAdd"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        a_indices a_values a_shape b
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg {
-  description: "2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`."
-  name: "a_indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`."
-  name: "a_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`."
-  name: "a_shape"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "`ndims`-D Tensor.  With shape `a_shape`."
-  name: "b"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Get the value of the tensor specified by its handle.
-
-getSessionTensor :: forall v1 dtype . (TensorType dtype) =>
-                    Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state.
-                    -> Tensor Value dtype -- ^ __value__: The tensor for the given handle.
-getSessionTensor handle | eqLengthGuard [] =
-    buildOp (opDef "GetSessionTensor"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        handle
-{-
-attr {
-  description: "The type of the output value."
-  name: "dtype"
-  type: "type"
-}
-input_arg {
-  description: "The handle for a tensor stored in the session state."
-  name: "handle"
-  type: DT_STRING
-}
-output_arg {
-  description: "The tensor for the given handle."
-  name: "value"
-  type_attr: "dtype"
-}
--}
-
--- | Reorders a SparseTensor into the canonical, row-major ordering.
---
--- Note that by convention, all sparse ops preserve the canonical ordering along
--- increasing dimension number. The only time ordering can be violated is during
--- manual manipulation of the indices and values vectors to add entries.
--- 
--- Reordering does not affect the shape of the SparseTensor.
--- 
--- If the tensor has rank `R` and `N` non-empty values, `input_indices` has
--- shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
-sparseReorder :: forall v1 v2 v3 t . (TensorType t) =>
-                 Tensor v1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
-                                          -- SparseTensor, possibly not in canonical ordering.
-                 -> Tensor v2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
-                 -> Tensor v3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
-                 -> (Tensor Value Data.Int.Int64, Tensor Value t)
-                 -- ^ (__output_indices__, __output_values__)
-                 --
-                 -- * __output_indices__: 2-D.  `N x R` matrix with the same indices as input_indices, but
-                 -- in canonical row-major ordering.
-                 --
-                 -- * __output_values__: 1-D.  `N` non-empty values corresponding to `output_indices`.
-sparseReorder input_indices input_values input_shape | eqLengthGuard [] =
-    buildOp (opDef "SparseReorder"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input_indices input_values input_shape
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
-  name: "input_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
-  name: "input_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  Shape of the input SparseTensor."
-  name: "input_shape"
-  type: DT_INT64
-}
-output_arg {
-  description: "2-D.  `N x R` matrix with the same indices as input_indices, but\nin canonical row-major ordering."
-  name: "output_indices"
-  type: DT_INT64
-}
-output_arg {
-  description: "1-D.  `N` non-empty values corresponding to `output_indices`."
-  name: "output_values"
-  type_attr: "T"
-}
--}
-
--- | Split a `SparseTensor` into `num_split` tensors along one dimension.
---
--- If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
--- `[0 : shape[split_dim] % num_split]` gets one extra dimension.
--- For example, if `split_dim = 1` and `num_split = 2` and the input is
--- 
---     input_tensor = shape = [2, 7]
---     [    a   d e  ]
---     [b c          ]
--- 
--- Graphically the output tensors are:
--- 
---     output_tensor[0] = shape = [2, 4]
---     [    a  ]
---     [b c    ]
--- 
---     output_tensor[1] = shape = [2, 3]
---     [ d e  ]
---     [      ]
-sparseSplit :: forall v1 v2 v3 v4 t . (TensorType t) =>
-               Data.Int.Int64 -- ^ __num_split__: The number of ways to split.
-               -> Tensor v1 Data.Int.Int64 -- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range
-                                           -- `[0, rank(shape))`.
-               -> Tensor v2 Data.Int.Int64 -- ^ __indices__: 2-D tensor represents the indices of the sparse tensor.
-               -> Tensor v3 t -- ^ __values__: 1-D tensor represents the values of the sparse tensor.
-               -> Tensor v4 Data.Int.Int64 -- ^ __shape__: 1-D. tensor represents the shape of the sparse tensor.
-                                           -- output indices: A list of 1-D tensors represents the indices of the output
-                                           -- sparse tensors.
-               -> ([Tensor Value Data.Int.Int64], [Tensor Value t],
-                   [Tensor Value Data.Int.Int64])
-               -- ^ (__output_indices__, __output_values__, __output_shape__)
-               --
-               -- * __output_indices__
-               --
-               -- * __output_values__: A list of 1-D tensors represents the values of the output sparse
-               -- tensors.
-               --
-               -- * __output_shape__: A list of 1-D tensors represents the shape of the output sparse
-               -- tensors.
-sparseSplit num_split split_dim indices values shape | eqLengthGuard [] =
-    buildListOp [num_split, num_split, num_split] (opDef "SparseSplit"
-                                                   & opAttr "T" .~ tensorType (undefined :: t)
-                                                   & opAttr "num_split" .~ num_split)
-        split_dim indices values shape
-{-
-attr {
-  description: "The number of ways to split."
-  has_minimum: true
-  minimum: 1
-  name: "num_split"
-  type: "int"
-}
-attr { name: "T" type: "type" }
-input_arg {
-  description: "0-D.  The dimension along which to split.  Must be in the range\n`[0, rank(shape))`."
-  name: "split_dim"
-  type: DT_INT64
-}
-input_arg {
-  description: "2-D tensor represents the indices of the sparse tensor."
-  name: "indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D tensor represents the values of the sparse tensor."
-  name: "values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D. tensor represents the shape of the sparse tensor.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors."
-  name: "shape"
-  type: DT_INT64
-}
-output_arg {
-  name: "output_indices" number_attr: "num_split" type: DT_INT64
-}
-output_arg {
-  description: "A list of 1-D tensors represents the values of the output sparse\ntensors."
-  name: "output_values"
-  number_attr: "num_split"
-  type_attr: "T"
-}
-output_arg {
-  description: "A list of 1-D tensors represents the shape of the output sparse\ntensors."
-  name: "output_shape"
-  number_attr: "num_split"
-  type: DT_INT64
-}
--}
-
--- | Pads a tensor with zeros.
---
--- This operation pads a `input` with zeros according to the `paddings` you
--- specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
--- rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
--- how many zeros to add before the contents of `input` in that dimension, and
--- `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
--- in that dimension.
--- 
--- The padded size of each dimension D of the output is:
--- 
--- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
--- 
--- For example:
--- 
--- ```prettyprint
--- # 't' is [[1, 1], [2, 2]]
--- # 'paddings' is [[1, 1], [2, 2]]
--- # rank of 't' is 2
--- pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
---                       [0, 0, 1, 1, 0, 0]
---                       [0, 0, 2, 2, 0, 0]
---                       [0, 0, 0, 0, 0, 0]]
--- ```
-pad :: forall v1 v2 t tpaddings . (TensorType t, TensorType tpaddings,
-                                   OneOf '[Data.Int.Int32,
-                                           Data.Int.Int64] tpaddings) =>
-       Tensor v1 t -- ^ __input__
-       -> Tensor v2 tpaddings -- ^ __paddings__
-       -> Tensor Value t -- ^ __output__
-pad input paddings | eqLengthGuard [] =
-    buildOp (opDef "Pad"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings))
-        input paddings
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tpaddings"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-input_arg { name: "paddings" type_attr: "Tpaddings" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Converts a sparse representation into a dense tensor.
---
--- Builds an array `dense` with shape `output_shape` such that
--- 
--- ```prettyprint
--- # If sparse_indices is scalar
--- dense[i] = (i == sparse_indices ? sparse_values : default_value)
--- 
--- # If sparse_indices is a vector, then for each i
--- dense[sparse_indices[i]] = sparse_values[i]
--- 
--- # If sparse_indices is an n by d matrix, then for each i in [0, n)
--- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
--- ```
--- 
--- All other values in `dense` are set to `default_value`.  If `sparse_values` is a
--- scalar, all sparse indices are set to this single value.
--- 
--- Indices should be sorted in lexicographic order, and indices must not
--- contain any repeats. If `validate_indices` is true, these properties
--- are checked during execution.
-sparseToDense :: forall v1 v2 v3 v4 t tindices . (TensorType t,
-                                                  TensorType tindices,
-                                                  OneOf '[Data.Int.Int32,
-                                                          Data.Int.Int64] tindices) =>
-                 Tensor v1 tindices -- ^ __sparse_indices__: 0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
-                                    -- index where `sparse_values[i]` will be placed.
-                 -> Tensor v2 tindices -- ^ __output_shape__: 1-D.  Shape of the dense output tensor.
-                 -> Tensor v3 t -- ^ __sparse_values__: 1-D.  Values corresponding to each row of `sparse_indices`,
-                                -- or a scalar value to be used for all sparse indices.
-                 -> Tensor v4 t -- ^ __default_value__: Scalar value to set for indices not specified in
-                                -- `sparse_indices`.
-                 -> Tensor Value t -- ^ __dense__: Dense output tensor of shape `output_shape`.
-sparseToDense sparse_indices output_shape sparse_values
-              default_value | eqLengthGuard [] =
-    buildOp (opDef "SparseToDense"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        sparse_indices output_shape sparse_values default_value
-{-
-attr {
-  default_value { b: true }
-  description: "If true, indices are checked to make sure they are sorted in\nlexicographic order and that there are no repeats."
-  name: "validate_indices"
-  type: "bool"
-}
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg {
-  description: "0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete\nindex where `sparse_values[i]` will be placed."
-  name: "sparse_indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "1-D.  Shape of the dense output tensor."
-  name: "output_shape"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "1-D.  Values corresponding to each row of `sparse_indices`,\nor a scalar value to be used for all sparse indices."
-  name: "sparse_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scalar value to set for indices not specified in\n`sparse_indices`."
-  name: "default_value"
-  type_attr: "T"
-}
-output_arg {
-  description: "Dense output tensor of shape `output_shape`."
-  name: "dense"
-  type_attr: "T"
-}
--}
-
--- | Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
---
--- No validity checking is performed on the indices of A.  However, the following
--- input format is recommended for optimal behavior:
--- 
--- if adjoint_a == false:
---   A should be sorted in lexicographically increasing order.  Use SparseReorder
---   if you're not sure.
--- if adjoint_a == true:
---   A should be sorted in order of increasing dimension 1 (i.e., "column major"
---   order instead of "row major" order).
-sparseTensorDenseMatMul :: forall v1 v2 v3 v4 t . (TensorType t) =>
-                           Tensor v1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
-                           -> Tensor v2 t -- ^ __a_values__: 1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector.
-                           -> Tensor v3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector.
-                           -> Tensor v4 t -- ^ __b__: 2-D.  A dense Matrix.
-                           -> Tensor Value t -- ^ __product__
-sparseTensorDenseMatMul a_indices a_values a_shape b | eqLengthGuard [] =
-    buildOp (opDef "SparseTensorDenseMatMul"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        a_indices a_values a_shape b
-{-
-attr { name: "T" type: "type" }
-attr {
-  default_value { b: false }
-  description: "Use the adjoint of A in the matrix multiply.  If A is complex, this\nis transpose(conj(A)).  Otherwise it\'s transpose(A)."
-  name: "adjoint_a"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "Use the adjoint of B in the matrix multiply.  If B is complex, this\nis transpose(conj(B)).  Otherwise it\'s transpose(B)."
-  name: "adjoint_b"
-  type: "bool"
-}
-input_arg {
-  description: "2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix."
-  name: "a_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector."
-  name: "a_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector."
-  name: "a_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "2-D.  A dense Matrix." name: "b" type_attr: "T"
-}
-output_arg { name: "product" type_attr: "T" }
--}
-
--- | Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
---
--- This operation folds the padded areas of `input` by `MirrorPad` according to the
--- `paddings` you specify. `paddings` must be the same as `paddings` argument
--- given to the corresponding `MirrorPad` op.
--- 
--- The folded size of each dimension D of the output is:
--- 
--- `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
--- 
--- For example:
--- 
--- ```prettyprint
--- # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
--- # 'paddings' is [[0, 1]], [0, 1]].
--- # 'mode' is SYMMETRIC.
--- # rank of 't' is 2.
--- pad(t, paddings) ==> [[ 1,  5]
---                       [11, 28]]
--- ```
-mirrorPadGrad :: forall v1 v2 t tpaddings . (TensorType t, TensorType tpaddings,
-                                             OneOf '[Data.Int.Int32,
-                                                     Data.Int.Int64] tpaddings) =>
-                 Tensor v1 t -- ^ __input__: The input tensor to be folded.
-                 -> Tensor v2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
-                                        -- rows must be the same as the rank of `input`.
-                 -> Tensor Value t -- ^ __output__: The folded tensor.
-mirrorPadGrad input paddings | eqLengthGuard [] =
-    buildOp (opDef "MirrorPadGrad"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings))
-        input paddings
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tpaddings"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
-  description: "The mode used in the `MirrorPad` op."
-  name: "mode"
-  type: "string"
-}
-input_arg {
-  description: "The input tensor to be folded."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
-  name: "paddings"
-  type_attr: "Tpaddings"
-}
-output_arg {
-  description: "The folded tensor." name: "output" type_attr: "T"
-}
--}
-
--- | Randomly shuffles a tensor along its first dimension.
---
---   The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
---   to one and only one `output[i]`. For example, a mapping that might occur for a
---   3x2 tensor is:
--- 
--- ```prettyprint
--- [[1, 2],       [[5, 6],
---  [3, 4],  ==>   [1, 2],
---  [5, 6]]        [3, 4]]
--- ```
-randomShuffle :: forall v1 t . (TensorType t) =>
-                 Tensor v1 t -- ^ __value__: The tensor to be shuffled.
-                 -> Build (Tensor Value t) -- ^ __output__: A tensor of same shape and type as `value`, shuffled along its first
-                 -- dimension.
-randomShuffle value | eqLengthGuard [] =
-    buildOp (opDef "RandomShuffle"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        value
-{-
-attr {
-  default_value { i: 0 }
-  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "A second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The tensor to be shuffled."
-  name: "value"
-  type_attr: "T"
-}
-output_arg {
-  description: "A tensor of same shape and type as `value`, shuffled along its first\ndimension."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Selects elements from `t` or `e`, depending on `condition`.
---
--- The `t`, and `e` tensors must all have the same shape, and the
--- output will also have that shape.
--- 
--- The `condition` tensor must be a scalar if `t` and `e` are scalars.
--- If `t` and `e` are vectors or higher rank, then `condition` must be either a
--- scalar, a vector with size matching the first dimension of `t`, or must have
--- the same shape as `t`.
--- 
--- The `condition` tensor acts as a mask that chooses, based on the value at each
--- element, whether the corresponding element / row in the output should be
--- taken from `t` (if true) or `e` (if false).
--- 
--- If `condition` is a vector and `t` and `e` are higher rank matrices, then
--- it chooses which row (outer dimension) to copy from `t` and `e`.
--- If `condition` has the same shape as `t` and `e`, then it chooses which
--- element to copy from `t` and `e`.
--- 
--- For example:
--- 
--- ```prettyprint
--- # 'condition' tensor is [[True,  False]
--- #                        [False, True]]
--- # 't' is [[1, 2],
--- #         [3, 4]]
--- # 'e' is [[5, 6],
--- #         [7, 8]]
--- select(condition, t, e) ==> [[1, 6],
---                              [7, 4]]
--- 
--- 
--- # 'condition' tensor is [True, False]
--- # 't' is [[1, 2],
--- #         [3, 4]]
--- # 'e' is [[5, 6],
--- #         [7, 8]]
--- select(condition, t, e) ==> [[1, 2],
---                              [7, 8]]
--- 
--- ```
-select :: forall v1 v2 v3 t . (TensorType t) =>
-          Tensor v1 Bool -- ^ __condition__
-          -> Tensor v2 t -- ^ __t__: = A `Tensor` which may have the same shape as `condition`.
-                         -- If `condition` is rank 1, `t` may have higher rank,
-                         -- but its first dimension must match the size of `condition`.
-          -> Tensor v3 t -- ^ __e__: = A `Tensor` with the same type and shape as `t`.
-          -> Tensor Value t -- ^ __output__: = A `Tensor` with the same type and shape as `t` and `e`.
-select condition t e | eqLengthGuard [] =
-    buildOp (opDef "Select"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        condition t e
-{-
-attr { name: "T" type: "type" }
-input_arg { name: "condition" type: DT_BOOL }
-input_arg {
-  description: "= A `Tensor` which may have the same shape as `condition`.\nIf `condition` is rank 1, `t` may have higher rank,\nbut its first dimension must match the size of `condition`."
-  name: "t"
-  type_attr: "T"
-}
-input_arg {
-  description: "= A `Tensor` with the same type and shape as `t`."
-  name: "e"
-  type_attr: "T"
-}
-output_arg {
-  description: "= A `Tensor` with the same type and shape as `t` and `e`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | The gradient operator for the SparseAdd op.
---
--- The SparseAdd op calculates A + B, where A, B, and the sum are all represented
--- as `SparseTensor` objects.  This op takes in the upstream gradient w.r.t.
--- non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
--- values of A and B.
-sparseAddGrad :: forall v1 v2 v3 v4 t . (TensorType t,
-                                         OneOf '[(Data.Complex.Complex Double),
-                                                 (Data.Complex.Complex Float),
-                                                 Data.Int.Int16, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Data.Word.Word16,
-                                                 Data.Word.Word8, Double,
-                                                 Float] t) =>
-                 Tensor v1 t -- ^ __backprop_val_grad__: 1-D with shape `[nnz(sum)]`.  The gradient with respect to
-                             -- the non-empty values of the sum.
-                 -> Tensor v2 Data.Int.Int64 -- ^ __a_indices__: 2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
-                 -> Tensor v3 Data.Int.Int64 -- ^ __b_indices__: 2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
-                 -> Tensor v4 Data.Int.Int64 -- ^ __sum_indices__: 2-D.  The `indices` of the sum `SparseTensor`, size
-                                             -- `[nnz(sum), ndims]`.
-                 -> (Tensor Value t, Tensor Value t)
-                 -- ^ (__a_val_grad__, __b_val_grad__)
-                 --
-                 -- * __a_val_grad__: 1-D with shape `[nnz(A)]`. The gradient with respect to the
-                 -- non-empty values of A.
-                 --
-                 -- * __b_val_grad__: 1-D with shape `[nnz(B)]`. The gradient with respect to the
-                 -- non-empty values of B.
-sparseAddGrad backprop_val_grad a_indices b_indices
-              sum_indices | eqLengthGuard [] =
-    buildOp (opDef "SparseAddGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        backprop_val_grad a_indices b_indices sum_indices
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "1-D with shape `[nnz(sum)]`.  The gradient with respect to\nthe non-empty values of the sum."
-  name: "backprop_val_grad"
-  type_attr: "T"
-}
-input_arg {
-  description: "2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`."
-  name: "a_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`."
-  name: "b_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "2-D.  The `indices` of the sum `SparseTensor`, size\n`[nnz(sum), ndims]`."
-  name: "sum_indices"
-  type: DT_INT64
-}
-output_arg {
-  description: "1-D with shape `[nnz(A)]`. The gradient with respect to the\nnon-empty values of A."
-  name: "a_val_grad"
-  type_attr: "T"
-}
-output_arg {
-  description: "1-D with shape `[nnz(B)]`. The gradient with respect to the\nnon-empty values of B."
-  name: "b_val_grad"
-  type_attr: "T"
-}
--}
-
--- | Computes fingerprints of the input strings.
-
-sdcaFprint :: Tensor v1 Data.ByteString.ByteString -- ^ __input__: vector of strings to compute fingerprints on.
-              -> Tensor Value Data.Int.Int64 -- ^ __output__: a (N,2) shaped matrix where N is the number of elements in the input
-              -- vector. Each row contains the low and high parts of the fingerprint.
-sdcaFprint input | eqLengthGuard [] =
-    buildOp (opDef "SdcaFprint")
-        input
-{-
-input_arg {
-  description: "vector of strings to compute fingerprints on."
-  name: "input"
-  type: DT_STRING
-}
-output_arg {
-  description: "a (N,2) shaped matrix where N is the number of elements in the input\nvector. Each row contains the low and high parts of the fingerprint."
-  name: "output"
-  type: DT_INT64
-}
--}
-
--- | 
-
-tensorArrayUnpack :: forall v2 v3 t . (TensorType t) =>
-                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__
-                     -> Tensor v2 t -- ^ __value__
-                     -> Tensor v3 Float -- ^ __flow_in__
-                     -> Build (Tensor Value Float) -- ^ __flow_out__
-tensorArrayUnpack handle value flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayUnpack"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        handle value flow_in
-{-
-attr { name: "T" type: "type" }
-input_arg { is_ref: true name: "handle" type: DT_STRING }
-input_arg { name: "value" type_attr: "T" }
-input_arg { name: "flow_in" type: DT_FLOAT }
-output_arg { name: "flow_out" type: DT_FLOAT }
--}
-
--- | Produces the average pool of the input tensor for quantized types.
-
-quantizedAvgPool :: forall v1 v2 v3 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                               Data.Int.Int32,
-                                                               Data.Word.Word16,
-                                                               Data.Word.Word8] t) =>
-                    Tensor v1 t -- ^ __input__: 4-D with shape `[batch, height, width, channels]`.
-                    -> Tensor v2 Float -- ^ __min_input__: The float value that the lowest quantized input value represents.
-                    -> Tensor v3 Float -- ^ __max_input__: The float value that the highest quantized input value represents.
-                    -> (Tensor Value t, Tensor Value Float, Tensor Value Float)
-                    -- ^ (__output__, __min_output__, __max_output__)
-                    --
-                    -- * __output__
-                    --
-                    -- * __min_output__: The float value that the lowest quantized output value represents.
-                    --
-                    -- * __max_output__: The float value that the highest quantized output value represents.
-quantizedAvgPool input min_input max_input | eqLengthGuard [] =
-    buildOp (opDef "QuantizedAvgPool"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input min_input max_input
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input."
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the input\ntensor.  The length must be 4 to match the number of dimensions of the input."
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "The float value that the lowest quantized input value represents."
-  name: "min_input"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the highest quantized input value represents."
-  name: "max_input"
-  type: DT_FLOAT
-}
-output_arg { name: "output" type_attr: "T" }
-output_arg {
-  description: "The float value that the lowest quantized output value represents."
-  name: "min_output"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The float value that the highest quantized output value represents."
-  name: "max_output"
-  type: DT_FLOAT
-}
--}
-
--- | Adjust the contrast of one or more images.
---
--- `images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
--- interpreted as `[height, width, channels]`.  The other dimensions only
--- represent a collection of images, such as `[batch, height, width, channels].`
--- 
--- Contrast is adjusted independently for each channel of each image.
--- 
--- For each channel, the Op first computes the mean of the image pixels in the
--- channel and then adjusts each component of each pixel to
--- `(x - mean) * contrast_factor + mean`.
-adjustContrastv2 :: Tensor v1 Float -- ^ __images__: Images to adjust.  At least 3-D.
-                    -> Tensor v2 Float -- ^ __contrast_factor__: A float multiplier for adjusting contrast.
-                    -> Tensor Value Float -- ^ __output__: The contrast-adjusted image or images.
-adjustContrastv2 images contrast_factor | eqLengthGuard [] =
-    buildOp (opDef "AdjustContrastv2")
-        images contrast_factor
-{-
-input_arg {
-  description: "Images to adjust.  At least 3-D."
-  name: "images"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A float multiplier for adjusting contrast."
-  name: "contrast_factor"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The contrast-adjusted image or images."
-  name: "output"
-  type: DT_FLOAT
-}
--}
-
--- | Gather slices from the variable pointed to by `resource` according to `indices`.
---
--- `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
--- Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
--- 
--- ```python
---     # Scalar indices
---     output[:, ..., :] = params[indices, :, ... :]
--- 
---     # Vector indices
---     output[i, :, ..., :] = params[indices[i], :, ... :]
--- 
---     # Higher rank indices
---     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
--- ```
-resourceGather :: forall v2 dtype tindices . (TensorType dtype,
-                                              TensorType tindices,
-                                              OneOf '[Data.Int.Int32,
-                                                      Data.Int.Int64] tindices) =>
-                  ResourceHandle dtype -- ^ __resource__
-                  -> Tensor v2 tindices -- ^ __indices__
-                  -> Build (Tensor Value dtype) -- ^ __output__
-resourceGather resource indices | eqLengthGuard [] =
-    buildOp (opDef "ResourceGather"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        resource indices
-{-
-attr {
-  default_value { b: true } name: "validate_indices" type: "bool"
-}
-attr { name: "dtype" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg { name: "resource" type: DT_RESOURCE }
-input_arg { name: "indices" type_attr: "Tindices" }
-output_arg { name: "output" type_attr: "dtype" }
--}
-
--- | Merges summaries.
---
--- This op creates a
--- [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
--- protocol buffer that contains the union of all the values in the input
--- summaries.
--- 
--- When the Op is run, it reports an `InvalidArgument` error if multiple values
--- in the summaries to merge use the same tag.
-mergeSummary :: [Tensor v1 Data.ByteString.ByteString] -- ^ __inputs__: Can be of any shape.  Each must contain serialized `Summary` protocol
-                                                       -- buffers.
-                -> Tensor Value Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
-mergeSummary inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
-    buildOp (opDef "MergeSummary"
-             & opAttr "N" .~ n)
-        inputs
-  where
-    n = fromIntegral (length inputs) :: Int64
-{-
-attr { has_minimum: true minimum: 1 name: "N" type: "int" }
-input_arg {
-  description: "Can be of any shape.  Each must contain serialized `Summary` protocol\nbuffers."
-  name: "inputs"
-  number_attr: "N"
-  type: DT_STRING
-}
-output_arg {
-  description: "Scalar. Serialized `Summary` protocol buffer."
-  name: "summary"
-  type: DT_STRING
-}
--}
-
--- | Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.
-
-serializeSparse :: forall v1 v2 v3 t . (TensorType t) =>
-                   Tensor v1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the `SparseTensor`.
-                   -> Tensor v2 t -- ^ __sparse_values__: 1-D.  The `values` of the `SparseTensor`.
-                   -> Tensor v3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the `SparseTensor`.
-                   -> Tensor Value Data.ByteString.ByteString -- ^ __serialized_sparse__
-serializeSparse sparse_indices sparse_values sparse_shape | eqLengthGuard [] =
-    buildOp (opDef "SerializeSparse"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        sparse_indices sparse_values sparse_shape
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "2-D.  The `indices` of the `SparseTensor`."
-  name: "sparse_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  The `values` of the `SparseTensor`."
-  name: "sparse_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  The `shape` of the `SparseTensor`."
-  name: "sparse_shape"
-  type: DT_INT64
-}
-output_arg { name: "serialized_sparse" type: DT_STRING }
--}
-
--- | Training via negative sampling.
-
-negTrain :: Data.Int.Int64 -- ^ __num_negative_samples__: Number of negative samples per example.
-            -> Tensor Ref Float -- ^ __w_in__: input word embedding.
-            -> Tensor Ref Float -- ^ __w_out__: output word embedding.
-            -> Tensor v3 Data.Int.Int32 -- ^ __examples__: A vector of word ids.
-            -> Tensor v4 Data.Int.Int32 -- ^ __labels__: A vector of word ids.
-            -> Tensor v5 Float -- ^ __lr__
-            -> Build (ControlNode)
-negTrain num_negative_samples w_in w_out examples labels lr | eqLengthGuard [] =
-    buildOp (opDef "NegTrain"
-             & opAttr "num_negative_samples" .~ num_negative_samples)
-        w_in w_out examples labels lr
-{-
-attr {
-  description: "Count of words in the vocabulary."
-  name: "vocab_count"
-  type: "list(int)"
-}
-attr {
-  description: "Number of negative samples per example."
-  name: "num_negative_samples"
-  type: "int"
-}
-input_arg {
-  description: "input word embedding."
-  is_ref: true
-  name: "w_in"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "output word embedding."
-  is_ref: true
-  name: "w_out"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A vector of word ids."
-  name: "examples"
-  type: DT_INT32
-}
-input_arg {
-  description: "A vector of word ids." name: "labels" type: DT_INT32
-}
-input_arg { name: "lr" type: DT_FLOAT }
--}
-
--- | Delete the TensorArray from its resource container.  This enables
---
--- the user to close and release the resource in the middle of a step/run.
-tensorArrayCloseV2 :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
-                      -> ControlNode
-tensorArrayCloseV2 handle | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayCloseV2")
-        handle
-{-
-input_arg {
-  description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)."
-  name: "handle"
-  type: DT_STRING
-}
--}
-
--- | Generates labels for candidate sampling with a learned unigram distribution.
---
--- See explanations of candidate sampling and the data formats at
--- go/candidate-sampling.
--- 
--- For each batch, this op picks a single set of sampled candidate labels.
--- 
--- The advantages of sampling candidates per-batch are simplicity and the
--- possibility of efficient dense matrix multiplication. The disadvantage is that
--- the sampled candidates must be chosen independently of the context and of the
--- true labels.
-threadUnsafeUnigramCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch.
-                                       -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
-                                       -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
-                                       -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
-                                               -- candidates in a batch are unique. This requires some approximation to
-                                               -- estimate the post-rejection sampling probabilities.
-                                       -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
-                                                                   -- IDs of the num_true target_classes in the corresponding original label.
-                                       -> (Tensor Value Data.Int.Int64,
-                                           Tensor Value Float,
-                                           Tensor Value Float)
-                                       -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
-                                       --
-                                       -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
-                                       -- the ID of a sampled candidate.
-                                       --
-                                       -- * __true_expected_count__: A batch_size * num_true matrix, representing
-                                       -- the number of times each candidate is expected to occur in a batch
-                                       -- of sampled candidates. If unique=true, then this is a probability.
-                                       --
-                                       -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
-                                       -- candidate representing the number of times the candidate is expected
-                                       -- to occur in a batch of sampled candidates.  If unique=true, then this is a
-                                       -- probability.
-threadUnsafeUnigramCandidateSampler num_sampled num_true range_max unique
-                                    true_classes | eqLengthGuard [] =
-    buildOp (opDef "ThreadUnsafeUnigramCandidateSampler"
-             & opAttr "num_sampled" .~ num_sampled
-             & opAttr "num_true" .~ num_true
-             & opAttr "range_max" .~ range_max
-             & opAttr "unique" .~ unique)
-        true_classes
-{-
-attr {
-  description: "Number of true labels per context."
-  has_minimum: true
-  minimum: 1
-  name: "num_true"
-  type: "int"
-}
-attr {
-  description: "Number of candidates to randomly sample per batch."
-  has_minimum: true
-  minimum: 1
-  name: "num_sampled"
-  type: "int"
-}
-attr {
-  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
-  name: "unique"
-  type: "bool"
-}
-attr {
-  description: "The sampler will sample integers from the interval [0, range_max)."
-  has_minimum: true
-  minimum: 1
-  name: "range_max"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "An second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-input_arg {
-  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
-  name: "true_classes"
-  type: DT_INT64
-}
-output_arg {
-  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
-  name: "sampled_candidates"
-  type: DT_INT64
-}
-output_arg {
-  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
-  name: "true_expected_count"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
-  name: "sampled_expected_count"
-  type: DT_FLOAT
-}
--}
-
--- | Converts each string in the input Tensor to the specified numeric type.
---
--- (Note that int32 overflow results in an error while float overflow
--- results in a rounded value.)
-stringToNumber :: forall v1 out_type . (TensorType out_type,
-                                        OneOf '[Data.Int.Int32,
-                                                Float] out_type) =>
-                  Tensor v1 Data.ByteString.ByteString -- ^ __string_tensor__
-                  -> Tensor Value out_type -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
-stringToNumber string_tensor | eqLengthGuard [] =
-    buildOp (opDef "StringToNumber"
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        string_tensor
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_INT32 } }
-  default_value { type: DT_FLOAT }
-  description: "The numeric type to interpret each string in `string_tensor` as."
-  name: "out_type"
-  type: "type"
-}
-input_arg { name: "string_tensor" type: DT_STRING }
-output_arg {
-  description: "A Tensor of the same shape as the input `string_tensor`."
-  name: "output"
-  type_attr: "out_type"
-}
--}
-
--- | Performs beam search decoding on the logits given in input.
---
--- A note about the attribute merge_repeated: For the beam search decoder,
--- this means that if consecutive entries in a beam are the same, only
--- the first of these is emitted.  That is, when the top path is "A B B B B",
--- "A B" is returned if merge_repeated = True but "A B B B B" is
--- returned if merge_repeated = False.
-cTCBeamSearchDecoder :: Data.Int.Int64 -- ^ __beam_width__: A scalar >= 0 (beam search beam width).
-                        -> Data.Int.Int64 -- ^ __top_paths__: A scalar >= 0, <= beam_width (controls output size).
-                        -> Tensor v1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
-                        -> Tensor v2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch)`.
-                        -> ([Tensor Value Data.Int.Int64],
-                            [Tensor Value Data.Int.Int64],
-                            [Tensor Value Data.Int.Int64], Tensor Value Float)
-                        -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__)
-                        --
-                        -- * __decoded_indices__: A list (length: top_paths) of indices matrices.  Matrix j,
-                        -- size `(total_decoded_outputs[j] x 2)`, has indices of a
-                        -- `SparseTensor<int64, 2>`.  The rows store: [batch, time].
-                        --
-                        -- * __decoded_values__: A list (length: top_paths) of values vectors.  Vector j,
-                        -- size `(length total_decoded_outputs[j])`, has the values of a
-                        -- `SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j.
-                        --
-                        -- * __decoded_shape__: A list (length: top_paths) of shape vector.  Vector j,
-                        -- size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
-                        -- Its values are: `[batch_size, max_decoded_length[j]]`.
-                        --
-                        -- * __log_probability__: A matrix, shaped: `(batch_size x top_paths)`.  The
-                        -- sequence log-probabilities.
-cTCBeamSearchDecoder beam_width top_paths inputs
-                     sequence_length | eqLengthGuard [] =
-    buildListOp [top_paths, top_paths, top_paths] (opDef "CTCBeamSearchDecoder"
-                                                   & opAttr "beam_width" .~ beam_width
-                                                   & opAttr "top_paths" .~ top_paths)
-        inputs sequence_length
-{-
-attr {
-  description: "A scalar >= 0 (beam search beam width)."
-  has_minimum: true
-  minimum: 1
-  name: "beam_width"
-  type: "int"
-}
-attr {
-  description: "A scalar >= 0, <= beam_width (controls output size)."
-  has_minimum: true
-  minimum: 1
-  name: "top_paths"
-  type: "int"
-}
-attr {
-  default_value { b: true }
-  description: "If true, merge repeated classes in output."
-  name: "merge_repeated"
-  type: "bool"
-}
-input_arg {
-  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
-  name: "inputs"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A vector containing sequence lengths, size `(batch)`."
-  name: "sequence_length"
-  type: DT_INT32
-}
-output_arg {
-  description: "A list (length: top_paths) of indices matrices.  Matrix j,\nsize `(total_decoded_outputs[j] x 2)`, has indices of a\n`SparseTensor<int64, 2>`.  The rows store: [batch, time]."
-  name: "decoded_indices"
-  number_attr: "top_paths"
-  type: DT_INT64
-}
-output_arg {
-  description: "A list (length: top_paths) of values vectors.  Vector j,\nsize `(length total_decoded_outputs[j])`, has the values of a\n`SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j."
-  name: "decoded_values"
-  number_attr: "top_paths"
-  type: DT_INT64
-}
-output_arg {
-  description: "A list (length: top_paths) of shape vector.  Vector j,\nsize `(2)`, stores the shape of the decoded `SparseTensor[j]`.\nIts values are: `[batch_size, max_decoded_length[j]]`."
-  name: "decoded_shape"
-  number_attr: "top_paths"
-  type: DT_INT64
-}
-output_arg {
-  description: "A matrix, shaped: `(batch_size x top_paths)`.  The\nsequence log-probabilities."
-  name: "log_probability"
-  type: DT_FLOAT
-}
--}
-
--- | Transforms a serialized tensorflow.TensorProto proto into a Tensor.
-
-parseTensor :: forall v1 out_type . (TensorType out_type) =>
-               Tensor v1 Data.ByteString.ByteString -- ^ __serialized__: A scalar string containing a serialized TensorProto proto.
-               -> Tensor Value out_type -- ^ __output__: A Tensor of type `out_type`.
-parseTensor serialized | eqLengthGuard [] =
-    buildOp (opDef "ParseTensor"
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        serialized
-{-
-attr {
-  description: "The type of the serialized tensor.  The provided type must match the\ntype of the serialized tensor and no implicit conversion will take place."
-  name: "out_type"
-  type: "type"
-}
-input_arg {
-  description: "A scalar string containing a serialized TensorProto proto."
-  name: "serialized"
-  type: DT_STRING
-}
-output_arg {
-  description: "A Tensor of type `out_type`."
-  name: "output"
-  type_attr: "out_type"
-}
--}
-
--- | Outputs a `Summary` protocol buffer with images.
---
--- The summary has up to `max_images` summary values containing images. The
--- images are built from `tensor` which must be 4-D with shape `[batch_size,
--- height, width, channels]` and where `channels` can be:
--- 
--- *  1: `tensor` is interpreted as Grayscale.
--- *  3: `tensor` is interpreted as RGB.
--- *  4: `tensor` is interpreted as RGBA.
--- 
--- The images have the same number of channels as the input tensor. For float
--- input, the values are normalized one image at a time to fit in the range
--- `[0, 255]`.  `uint8` values are unchanged.  The op uses two different
--- normalization algorithms:
--- 
--- *  If the input values are all positive, they are rescaled so the largest one
---    is 255.
--- 
--- *  If any input value is negative, the values are shifted so input value 0.0
---    is at 127.  They are then rescaled so that either the smallest value is 0,
---    or the largest one is 255.
--- 
--- The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
--- build the `tag` of the summary values:
--- 
--- *  If `max_images` is 1, the summary value tag is '*tag*/image'.
--- *  If `max_images` is greater than 1, the summary value tags are
---    generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
--- 
--- The `bad_color` argument is the color to use in the generated images for
--- non-finite input values.  It is a `unit8` 1-D tensor of length `channels`.
--- Each element must be in the range `[0, 255]` (It represents the value of a
--- pixel in the output image).  Non-finite values in the input tensor are
--- replaced by this tensor in the output image.  The default value is the color
--- red.
-imageSummary :: forall v1 v2 t . (TensorType t, OneOf '[Data.Word.Word16,
-                                                        Data.Word.Word8,
-                                                        Float] t) =>
-                Tensor v1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.
-                -> Tensor v2 t -- ^ __tensor__: 4-D of shape `[batch_size, height, width, channels]` where
-                               -- `channels` is 1, 3, or 4.
-                -> Tensor Value Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
-imageSummary tag tensor | eqLengthGuard [] =
-    buildOp (opDef "ImageSummary"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        tag tensor
-{-
-attr {
-  default_value { i: 3 }
-  description: "Max number of batch elements to generate images for."
-  has_minimum: true
-  minimum: 1
-  name: "max_images"
-  type: "int"
-}
-attr {
-  allowed_values {
-    list { type: DT_UINT8 type: DT_FLOAT type: DT_HALF }
-  }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value {
-    tensor {
-      dtype: DT_UINT8
-      int_val: 255
-      int_val: 0
-      int_val: 0
-      int_val: 255
-      tensor_shape { dim { size: 4 } }
-    }
-  }
-  description: "Color to use for pixels with non-finite values."
-  name: "bad_color"
-  type: "tensor"
-}
-input_arg {
-  description: "Scalar. Used to build the `tag` attribute of the summary values."
-  name: "tag"
-  type: DT_STRING
-}
-input_arg {
-  description: "4-D of shape `[batch_size, height, width, channels]` where\n`channels` is 1, 3, or 4."
-  name: "tensor"
-  type_attr: "T"
-}
-output_arg {
-  description: "Scalar. Serialized `Summary` protocol buffer."
-  name: "summary"
-  type: DT_STRING
-}
--}
-
--- | Returns x / y element-wise for integer types.
---
--- Truncation designates that negative numbers will round fractional quantities
--- toward zero. I.e. -7 / 5 = 1. This matches C semantics but it is different
--- than Python semantics. See `FloorDiv` for a division function that matches
--- Python Semantics.
--- 
--- *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-truncateDiv :: forall v1 v2 t . (TensorType t,
-                                 OneOf '[(Data.Complex.Complex Double),
-                                         (Data.Complex.Complex Float),
-                                         Data.Int.Int16, Data.Int.Int32,
-                                         Data.Int.Int64, Data.Int.Int8,
-                                         Data.Word.Word16, Data.Word.Word8,
-                                         Double, Float] t) =>
-               Tensor v1 t -- ^ __x__
-               -> Tensor v2 t -- ^ __y__
-               -> Tensor Value t -- ^ __z__
-truncateDiv x y | eqLengthGuard [] =
-    buildOp (opDef "TruncateDiv"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Computes the Cholesky decomposition of one or more square matrices.
---
--- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
--- form square matrices, with the same constraints as the single matrix Cholesky
--- decomposition above. The output is a tensor of the same shape as the input
--- containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
-cholesky :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
-            Tensor v1 t -- ^ __input__: Shape is `[..., M, M]`.
-            -> Tensor Value t -- ^ __output__: Shape is `[..., M, M]`.
-cholesky input | eqLengthGuard [] =
-    buildOp (opDef "Cholesky"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
-}
-output_arg {
-  description: "Shape is `[..., M, M]`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | 
-
-batchMatrixSolveLs :: forall v1 v2 v3 t . (TensorType t, OneOf '[Double,
-                                                                 Float] t) =>
-                      Tensor v1 t -- ^ __matrix__
-                      -> Tensor v2 t -- ^ __rhs__
-                      -> Tensor v3 Double -- ^ __l2_regularizer__
-                      -> Tensor Value t -- ^ __output__
-batchMatrixSolveLs matrix rhs l2_regularizer | eqLengthGuard [] =
-    buildOp (opDef "BatchMatrixSolveLs"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        matrix rhs l2_regularizer
-{-
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-attr { default_value { b: true } name: "fast" type: "bool" }
-input_arg { name: "matrix" type_attr: "T" }
-input_arg { name: "rhs" type_attr: "T" }
-input_arg { name: "l2_regularizer" type: DT_DOUBLE }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Outputs all keys and values in the table.
-
-lookupTableExport :: forall tkeys tvalues . (TensorType tkeys,
-                                             TensorType tvalues) =>
-                     Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
-                     -> Build ((Tensor Value tkeys, Tensor Value tvalues))
-                     -- ^ (__keys__, __values__)
-                     --
-                     -- * __keys__: Vector of all keys present in the table.
-                     --
-                     -- * __values__: Tensor of all values in the table. Indexed in parallel with `keys`.
-lookupTableExport table_handle | eqLengthGuard [] =
-    buildOp (opDef "LookupTableExport"
-             & opAttr "Tkeys" .~ tensorType (undefined :: tkeys)
-             & opAttr "Tvalues" .~ tensorType (undefined :: tvalues))
-        table_handle
-{-
-attr { name: "Tkeys" type: "type" }
-attr { name: "Tvalues" type: "type" }
-input_arg {
-  description: "Handle to the table."
-  is_ref: true
-  name: "table_handle"
-  type: DT_STRING
-}
-output_arg {
-  description: "Vector of all keys present in the table."
-  name: "keys"
-  type_attr: "Tkeys"
-}
-output_arg {
-  description: "Tensor of all values in the table. Indexed in parallel with `keys`."
-  name: "values"
-  type_attr: "Tvalues"
-}
--}
-
--- | 
-
-batchSvd :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                 (Data.Complex.Complex Float),
-                                                 Double, Float] t) =>
-            Tensor v1 t -- ^ __input__
-            -> (Tensor Value t, Tensor Value t, Tensor Value t)
-            -- ^ (__s__, __u__, __v__)
-            --
-            -- * __s__
-            --
-            -- * __u__
-            --
-            -- * __v__
-batchSvd input | eqLengthGuard [] =
-    buildOp (opDef "BatchSvd"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { default_value { b: true } name: "compute_uv" type: "bool" }
-attr {
-  default_value { b: false } name: "full_matrices" type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_DOUBLE
-      type: DT_FLOAT
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "s" type_attr: "T" }
-output_arg { name: "u" type_attr: "T" }
-output_arg { name: "v" type_attr: "T" }
--}
-
--- | Resize `images` to `size` using bicubic interpolation.
---
--- Input images can be of different types but output images are always float.
-resizeBicubic :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                         Data.Int.Int32,
-                                                         Data.Int.Int64,
-                                                         Data.Int.Int8,
-                                                         Data.Word.Word16,
-                                                         Data.Word.Word8,
-                                                         Double, Float] t) =>
-                 Tensor v1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
-                 -> Tensor v2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
-                                             -- new size for the images.
-                 -> Tensor Value Float -- ^ __resized_images__: 4-D with shape
-                 -- `[batch, new_height, new_width, channels]`.
-resizeBicubic images size | eqLengthGuard [] =
-    buildOp (opDef "ResizeBicubic"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        images size
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
-  name: "align_corners"
-  type: "bool"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "images"
-  type_attr: "T"
-}
-input_arg {
-  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
-  name: "size"
-  type: DT_INT32
-}
-output_arg {
-  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
-  name: "resized_images"
-  type: DT_FLOAT
-}
--}
-
--- | Convert one or more images from HSV to RGB.
---
--- Outputs a tensor of the same shape as the `images` tensor, containing the RGB
--- value of the pixels. The output is only well defined if the value in `images`
--- are in `[0,1]`.
--- 
--- See `rgb_to_hsv` for a description of the HSV encoding.
-hSVToRGB :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
-            Tensor v1 t -- ^ __images__: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
-            -> Tensor Value t -- ^ __output__: `images` converted to RGB.
-hSVToRGB images | eqLengthGuard [] =
-    buildOp (opDef "HSVToRGB"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        images
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "1-D or higher rank. HSV data to convert. Last dimension must be size 3."
-  name: "images"
-  type_attr: "T"
-}
-output_arg {
-  description: "`images` converted to RGB."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Performs 3D average pooling on the input.
-
-avgPool3D :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                  (Data.Complex.Complex Float),
-                                                  Data.Int.Int16,
-                                                  Data.Int.Int32,
-                                                  Data.Int.Int64, Data.Int.Int8,
-                                                  Data.Word.Word16,
-                                                  Data.Word.Word8, Double,
-                                                  Float] t) =>
-             Tensor v1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
-             -> Tensor Value t -- ^ __output__: The average pooled output tensor.
-avgPool3D input | eqLengthGuard [] =
-    buildOp (opDef "AvgPool3D"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over."
-  name: "input"
-  type_attr: "T"
-}
-output_arg {
-  description: "The average pooled output tensor."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Delete the stack from its resource container.
-
-stackClose :: Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a stack.
-              -> Build (ControlNode)
-stackClose handle | eqLengthGuard [] =
-    buildOp (opDef "StackClose")
-        handle
-{-
-input_arg {
-  description: "The handle to a stack."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
--}
-
--- | Assigns a new value to a variable.
---
--- Any ReadVariableOp with a control dependency on this op is guaranteed to return
--- this value or a subsequent newer value of the variable.
-assignVariableOp :: forall v2 dtype . (TensorType dtype) =>
-                    ResourceHandle dtype -- ^ __resource__: handle to the resource in which to store the variable.
-                    -> Tensor v2 dtype -- ^ __value__: the value to set the new tensor to use.
-                    -> Build (ControlNode)
-assignVariableOp resource value | eqLengthGuard [] =
-    buildOp (opDef "AssignVariableOp"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        resource value
-{-
-attr {
-  description: "the dtype of the value." name: "dtype" type: "type"
-}
-input_arg {
-  description: "handle to the resource in which to store the variable."
-  name: "resource"
-  type: DT_RESOURCE
-}
-input_arg {
-  description: "the value to set the new tensor to use."
-  name: "value"
-  type_attr: "dtype"
-}
--}
-
--- | Local Response Normalization.
---
--- The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
--- dimension), and each vector is normalized independently.  Within a given vector,
--- each component is divided by the weighted, squared sum of inputs within
--- `depth_radius`.  In detail,
--- 
---     sqr_sum[a, b, c, d] =
---         sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
---     output = input / (bias + alpha * sqr_sum) ** beta
--- 
--- For details, see [Krizhevsky et al., ImageNet classification with deep
--- convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
-lRN :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Float] t) =>
-       Tensor v1 t -- ^ __input__: 4-D.
-       -> Tensor Value t -- ^ __output__
-lRN input | eqLengthGuard [] =
-    buildOp (opDef "LRN"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  default_value { i: 5 }
-  description: "0-D.  Half-width of the 1-D normalization window."
-  name: "depth_radius"
-  type: "int"
-}
-attr {
-  default_value { f: 1.0 }
-  description: "An offset (usually positive to avoid dividing by 0)."
-  name: "bias"
-  type: "float"
-}
-attr {
-  default_value { f: 1.0 }
-  description: "A scale factor, usually positive."
-  name: "alpha"
-  type: "float"
-}
-attr {
-  default_value { f: 0.5 }
-  description: "An exponent."
-  name: "beta"
-  type: "float"
-}
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-input_arg { description: "4-D." name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
---
--- The Hurwitz zeta function is defined as:
--- 
--- ```
--- \zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}
--- ```
-zeta :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
-        Tensor v1 t -- ^ __x__
-        -> Tensor v2 t -- ^ __q__
-        -> Tensor Value t -- ^ __z__
-zeta x q | eqLengthGuard [] =
-    buildOp (opDef "Zeta"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x q
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "q" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Creates a TensorArray for storing the gradients of values in the given handle.
---
--- If the given TensorArray gradient already exists, returns a reference to it.
--- 
--- Locks the size of the original TensorArray by disabling its dynamic size flag.
--- 
--- **A note about the input flow_in:**
--- 
--- The handle flow_in forces the execution of the gradient lookup to occur
--- only after certain other operations have occurred.  For example, when
--- the forward TensorArray is dynamically sized, writes to this TensorArray
--- may resize the object.  The gradient TensorArray is statically sized based
--- on the size of the forward TensorArray when this operation executes.
--- Furthermore, the size of the forward TensorArray is frozen by this call.
--- As a result, the flow is used to ensure that the call to generate the gradient
--- TensorArray only happens after all writes are executed.
--- 
--- In the case of dynamically sized TensorArrays, gradient computation should
--- only be performed on read operations that have themselves been chained via
--- flow to occur only after all writes have executed. That way the final size
--- of the forward TensorArray is known when this operation is called.
--- 
--- **A note about the source attribute:**
--- 
--- TensorArray gradient calls use an accumulator TensorArray object.  If
--- multiple gradients are calculated and run in the same session, the multiple
--- gradient nodes may accidentally flow throuth the same accumulator TensorArray.
--- This double counts and generally breaks the TensorArray gradient flow.
--- 
--- The solution is to identify which gradient call this particular
--- TensorArray gradient is being called in.  This is performed by identifying
--- a unique string (e.g. "gradients", "gradients_1", ...) from the input
--- gradient Tensor's name.  This string is used as a suffix when creating
--- the TensorArray gradient object here (the attribute `source`).
--- 
--- The attribute `source` is added as a suffix to the forward TensorArray's
--- name when performing the creation / lookup, so that each separate gradient
--- calculation gets its own TensorArray accumulator.
-tensorArrayGradV2 :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to the forward TensorArray.
-                     -> Tensor v2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
-                     -> Build (Tensor Value Data.ByteString.ByteString) -- ^ __grad_handle__
-tensorArrayGradV2 handle flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayGradV2")
-        handle flow_in
-{-
-attr {
-  description: "The gradient source string, used to decide which gradient TensorArray\nto return."
-  name: "source"
-  type: "string"
-}
-input_arg {
-  description: "The handle to the forward TensorArray."
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "A float scalar that enforces proper chaining of operations."
-  name: "flow_in"
-  type: DT_FLOAT
-}
-output_arg { name: "grad_handle" type: DT_STRING }
--}
-
--- | Cast x of type SrcT to y of DstT.
-
-cast :: forall v1 srcT dstT . (TensorType srcT, TensorType dstT) =>
-        Tensor v1 srcT -- ^ __x__
-        -> Tensor Value dstT -- ^ __y__
-cast x | eqLengthGuard [] =
-    buildOp (opDef "Cast"
-             & opAttr "SrcT" .~ tensorType (undefined :: srcT)
-             & opAttr "DstT" .~ tensorType (undefined :: dstT))
-        x
-{-
-attr { name: "SrcT" type: "type" }
-attr { name: "DstT" type: "type" }
-input_arg { name: "x" type_attr: "SrcT" }
-output_arg { name: "y" type_attr: "DstT" }
--}
-
--- | Computes the Gauss error function of `x` element-wise.
-
-erf :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                            Float] t) => Tensor v1 t -- ^ __x__
-       -> Tensor Value t -- ^ __y__
-erf x | eqLengthGuard [] =
-    buildOp (opDef "Erf"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | 
-
-batchMatrixTriangularSolve :: forall v1 v2 t . (TensorType t, OneOf '[Double,
-                                                                      Float] t) =>
-                              Tensor v1 t -- ^ __matrix__
-                              -> Tensor v2 t -- ^ __rhs__
-                              -> Tensor Value t -- ^ __output__
-batchMatrixTriangularSolve matrix rhs | eqLengthGuard [] =
-    buildOp (opDef "BatchMatrixTriangularSolve"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        matrix rhs
-{-
-attr { default_value { b: true } name: "lower" type: "bool" }
-attr { default_value { b: false } name: "adjoint" type: "bool" }
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "matrix" type_attr: "T" }
-input_arg { name: "rhs" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Adds sparse updates to the variable referenced by `resource`.
---
--- This operation computes
--- 
---     # Scalar indices
---     ref[indices, ...] += updates[...]
--- 
---     # Vector indices (for each i)
---     ref[indices[i], ...] += updates[i, ...]
--- 
---     # High rank indices (for each i, ..., j)
---     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
--- 
--- Duplicate entries are handled correctly: if multiple `indices` reference
--- the same location, their contributions add.
--- 
--- Requires `updates.shape = indices.shape + ref.shape[1:]`.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/ScatterAdd.png" alt>
--- </div>
-resourceScatterAdd :: forall v2 v3 dtype tindices . (TensorType dtype,
-                                                     OneOf '[(Data.Complex.Complex Double),
-                                                             (Data.Complex.Complex Float),
-                                                             Data.Int.Int16,
-                                                             Data.Int.Int32,
-                                                             Data.Int.Int64,
-                                                             Data.Int.Int8,
-                                                             Data.Word.Word16,
-                                                             Data.Word.Word8,
-                                                             Double,
-                                                             Float] dtype,
-                                                     TensorType tindices,
-                                                     OneOf '[Data.Int.Int32,
-                                                             Data.Int.Int64] tindices) =>
-                      ResourceHandle dtype -- ^ __resource__: Should be from a `Variable` node.
-                      -> Tensor v2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
-                      -> Tensor v3 dtype -- ^ __updates__: A tensor of updated values to add to `ref`.
-                      -> Build (ControlNode)
-resourceScatterAdd resource indices updates | eqLengthGuard [] =
-    buildOp (opDef "ResourceScatterAdd"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        resource indices updates
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "dtype"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg {
-  description: "Should be from a `Variable` node."
-  name: "resource"
-  type: DT_RESOURCE
-}
-input_arg {
-  description: "A tensor of indices into the first dimension of `ref`."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "A tensor of updated values to add to `ref`."
-  name: "updates"
-  type_attr: "dtype"
-}
--}
-
--- | 
-
-batchCholeskyGrad :: forall v1 v2 t . (TensorType t, OneOf '[Double,
-                                                             Float] t) =>
-                     Tensor v1 t -- ^ __l__
-                     -> Tensor v2 t -- ^ __grad__
-                     -> Tensor Value t -- ^ __output__
-batchCholeskyGrad l grad | eqLengthGuard [] =
-    buildOp (opDef "BatchCholeskyGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        l grad
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "l" type_attr: "T" }
-input_arg { name: "grad" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | 
-
-batchMatrixInverse :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
-                      Tensor v1 t -- ^ __input__
-                      -> Tensor Value t -- ^ __output__
-batchMatrixInverse input | eqLengthGuard [] =
-    buildOp (opDef "BatchMatrixInverse"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { default_value { b: false } name: "adjoint" type: "bool" }
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Return the same ref tensor as the input ref tensor.
-
-refIdentity :: forall t . (TensorType t) => Tensor Ref t -- ^ __input__
-               -> Build (Tensor Ref t) -- ^ __output__
-refIdentity input | eqLengthGuard [] =
-    buildOp (opDef "RefIdentity"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { name: "T" type: "type" }
-input_arg { is_ref: true name: "input" type_attr: "T" }
-output_arg { is_ref: true name: "output" type_attr: "T" }
--}
-
--- | Computes the singular value decompositions of one or more matrices.
---
--- Computes the SVD of each inner matrix in `input` such that
--- `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
--- 
--- ```prettyprint
--- # a is a tensor containing a batch of matrices.
--- # s is a tensor of singular values for each matrix.
--- # u is the tensor containing of left singular vectors for each matrix.
--- # v is the tensor containing of right singular vectors for each matrix.
--- s, u, v = svd(a)
--- s, _, _ = svd(a, compute_uv=False)
--- ```
-svd :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                            (Data.Complex.Complex Float),
-                                            Double, Float] t) =>
-       Tensor v1 t -- ^ __input__: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
-                   -- form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
-       -> (Tensor Value t, Tensor Value t, Tensor Value t)
-       -- ^ (__s__, __u__, __v__)
-       --
-       -- * __s__: Singular values. Shape is `[..., P]`.
-       --
-       -- * __u__: Left singular vectors. If `full_matrices` is `False` then shape is
-       -- `[..., M, M]`; if `full_matrices` is `True` then shape is
-       -- `[..., M, P]`. Undefined if `compute_uv` is `False`.
-       --
-       -- * __v__: Left singular vectors. If `full_matrices` is `False` then shape is
-       -- `[..., N, N]`. If `full_matrices` is `True` then shape is `[..., N, P]`.
-       -- Undefined if `compute_uv` is false.
-svd input | eqLengthGuard [] =
-    buildOp (opDef "Svd"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  default_value { b: true }
-  description: "If true, left and right singular vectors will be\ncomputed and returned in `u` and `v`, respectively.\nIf false, `u` and `v` are not set and should never referenced."
-  name: "compute_uv"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "If true, compute full-sized `u` and `v`. If false\n(the default), compute only the leading `P` singular vectors.\nIgnored if `compute_uv` is `False`."
-  name: "full_matrices"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_DOUBLE
-      type: DT_FLOAT
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`."
-  name: "input"
-  type_attr: "T"
-}
-output_arg {
-  description: "Singular values. Shape is `[..., P]`."
-  name: "s"
-  type_attr: "T"
-}
-output_arg {
-  description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., M, M]`; if `full_matrices` is `True` then shape is\n`[..., M, P]`. Undefined if `compute_uv` is `False`."
-  name: "u"
-  type_attr: "T"
-}
-output_arg {
-  description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., N, N]`. If `full_matrices` is `True` then shape is `[..., N, P]`.\nUndefined if `compute_uv` is false."
-  name: "v"
-  type_attr: "T"
-}
--}
-
--- | Solves one or more linear least-squares problems.
---
--- `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
--- form matrices of size `[M, N]`. Rhs is a tensor of shape `[..., M, K]`.
--- The output is a tensor shape `[..., N, K]` where each output matrix solves
--- each of the equations matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]
--- in the least squares sense.
--- 
--- matrix and right-hand sides in the batch:
--- 
--- `matrix`=\\(A \in \Re^{m \times n}\\),
--- `rhs`=\\(B  \in \Re^{m \times k}\\),
--- `output`=\\(X  \in \Re^{n \times k}\\),
--- `l2_regularizer`=\\(\lambda\\).
--- 
--- If `fast` is `True`, then the solution is computed by solving the normal
--- equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
--- \\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares
--- problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 +
--- \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
--- \\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
--- minimum-norm solution to the under-determined linear system, i.e.
--- \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to
--- \\(A Z = B\\). Notice that the fast path is only numerically stable when
--- \\(A\\) is numerically full rank and has a condition number
--- \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\) is
--- sufficiently large.
--- 
--- If `fast` is `False` an algorithm based on the numerically robust complete
--- orthogonal decomposition is used. This computes the minimum-norm
--- least-squares solution, even when \\(A\\) is rank deficient. This path is
--- typically 6-7 times slower than the fast path. If `fast` is `False` then
--- `l2_regularizer` is ignored.
-matrixSolveLs :: forall v1 v2 v3 t . (TensorType t, OneOf '[Double, Float] t) =>
-                 Tensor v1 t -- ^ __matrix__: Shape is `[..., M, N]`.
-                 -> Tensor v2 t -- ^ __rhs__: Shape is `[..., M, K]`.
-                 -> Tensor v3 Double -- ^ __l2_regularizer__: Scalar tensor.
-                                     -- 
-                                     -- @compatibility(numpy)
-                                     -- Equivalent to np.linalg.lstsq
-                                     -- @end_compatibility
-                 -> Tensor Value t -- ^ __output__: Shape is `[..., N, K]`.
-matrixSolveLs matrix rhs l2_regularizer | eqLengthGuard [] =
-    buildOp (opDef "MatrixSolveLs"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        matrix rhs l2_regularizer
-{-
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-attr { default_value { b: true } name: "fast" type: "bool" }
-input_arg {
-  description: "Shape is `[..., M, N]`."
-  name: "matrix"
-  type_attr: "T"
-}
-input_arg {
-  description: "Shape is `[..., M, K]`." name: "rhs" type_attr: "T"
-}
-input_arg {
-  description: "Scalar tensor.\n\n@compatibility(numpy)\nEquivalent to np.linalg.lstsq\n@end_compatibility"
-  name: "l2_regularizer"
-  type: DT_DOUBLE
-}
-output_arg {
-  description: "Shape is `[..., N, K]`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
---
--- Packs the `N` tensors in `values` into a tensor with rank one higher than each
--- tensor in `values`, by packing them along the `axis` dimension.
--- Given a list of tensors of shape `(A, B, C)`;
--- 
--- if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
--- if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
--- Etc.
--- 
--- For example:
--- 
--- ```prettyprint
--- # 'x' is [1, 4]
--- # 'y' is [2, 5]
--- # 'z' is [3, 6]
--- pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
--- pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
--- ```
--- 
--- This is the opposite of `unpack`.
-pack :: forall v1 t . (TensorType t) =>
-        [Tensor v1 t] -- ^ __values__: Must be of same shape and type.
-        -> Tensor Value t -- ^ __output__: The packed tensor.
-pack values | eqLengthGuard [("N", [("values", length values)])] =
-    buildOp (opDef "Pack"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "N" .~ n)
-        values
-  where
-    n = fromIntegral (length values) :: Int64
-{-
-attr { has_minimum: true minimum: 1 name: "N" type: "int" }
-attr { name: "T" type: "type" }
-attr {
-  default_value { i: 0 }
-  description: "Dimension along which to pack.  Negative values wrap around, so the\nvalid range is `[-(R+1), R+1)`."
-  name: "axis"
-  type: "int"
-}
-input_arg {
-  description: "Must be of same shape and type."
-  name: "values"
-  number_attr: "N"
-  type_attr: "T"
-}
-output_arg {
-  description: "The packed tensor." name: "output" type_attr: "T"
-}
--}
-
--- | Closes the given barrier.
---
--- This operation signals that no more new elements will be inserted in the
--- given barrier. Subsequent InsertMany that try to introduce a new key will fail.
--- Subsequent InsertMany operations that just add missing components to already
--- existing elements will continue to succeed. Subsequent TakeMany operations will
--- continue to succeed if sufficient completed elements remain in the barrier.
--- Subsequent TakeMany operations that would block will fail immediately.
-barrierClose :: Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
-                -> Build (ControlNode)
-barrierClose handle | eqLengthGuard [] =
-    buildOp (opDef "BarrierClose")
-        handle
-{-
-attr {
-  default_value { b: false }
-  description: "If true, all pending enqueue requests that are\nblocked on the barrier\'s queue will be cancelled. InsertMany will fail, even\nif no new key is introduced."
-  name: "cancel_pending_enqueues"
-  type: "bool"
-}
-input_arg {
-  description: "The handle to a barrier."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
--}
-
--- | Computes the eigen decomposition of one or more square self-adjoint matrices.
---
--- Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
--- `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.
--- 
--- ```prettyprint
--- # a is a tensor.
--- # e is a tensor of eigenvalues.
--- # v is a tensor of eigenvectors.
--- e, v = self_adjoint_eig(a)
--- e = self_adjoint_eig(a, compute_v=False)
--- ```
-selfAdjointEigV2 :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
-                    Tensor v1 t -- ^ __input__: `Tensor` input of shape `[N, N]`.
-                    -> (Tensor Value t, Tensor Value t) -- ^ (__e__, __v__)
-                    --
-                    -- * __e__: Eigenvalues. Shape is `[N]`.
-                    --
-                    -- * __v__: Eigenvectors. Shape is `[N, N]`.
-selfAdjointEigV2 input | eqLengthGuard [] =
-    buildOp (opDef "SelfAdjointEigV2"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  default_value { b: true }
-  description: "If `True` then eigenvectors will be computed and returned in `v`.\nOtherwise, only the eigenvalues will be computed."
-  name: "compute_v"
-  type: "bool"
-}
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "`Tensor` input of shape `[N, N]`."
-  name: "input"
-  type_attr: "T"
-}
-output_arg {
-  description: "Eigenvalues. Shape is `[N]`."
-  name: "e"
-  type_attr: "T"
-}
-output_arg {
-  description: "Eigenvectors. Shape is `[N, N]`."
-  name: "v"
-  type_attr: "T"
-}
--}
-
--- | Subtracts sparse updates to a variable reference.
---
---     # Scalar indices
---     ref[indices, ...] -= updates[...]
--- 
---     # Vector indices (for each i)
---     ref[indices[i], ...] -= updates[i, ...]
--- 
---     # High rank indices (for each i, ..., j)
---     ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
--- 
--- This operation outputs `ref` after the update is done.
--- This makes it easier to chain operations that need to use the reset value.
--- 
--- Duplicate entries are handled correctly: if multiple `indices` reference
--- the same location, their (negated) contributions add.
--- 
--- Requires `updates.shape = indices.shape + ref.shape[1:]`.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/ScatterSub.png" alt>
--- </div>
-scatterSub :: forall v2 v3 t tindices . (TensorType t,
-                                         OneOf '[(Data.Complex.Complex Double),
-                                                 (Data.Complex.Complex Float),
-                                                 Data.Int.Int16, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Data.Word.Word16,
-                                                 Data.Word.Word8, Double,
-                                                 Float] t, TensorType tindices,
-                                         OneOf '[Data.Int.Int32,
-                                                 Data.Int.Int64] tindices) =>
-              Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
-              -> Tensor v2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
-              -> Tensor v3 t -- ^ __updates__: A tensor of updated values to subtract from `ref`.
-              -> Build (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
-              -- to use the updated values after the update is done.
-scatterSub ref indices updates | eqLengthGuard [] =
-    buildOp (opDef "ScatterSub"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        ref indices updates
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a `Variable` node."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-input_arg {
-  description: "A tensor of indices into the first dimension of `ref`."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "A tensor of updated values to subtract from `ref`."
-  name: "updates"
-  type_attr: "T"
-}
-output_arg {
-  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
-  is_ref: true
-  name: "output_ref"
-  type_attr: "T"
-}
--}
-
--- | Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
---
--- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
--- form square matrices, with the same constraints as the single matrix
--- SelfAdjointEig.
--- 
--- The result is a [..., M+1, M] matrix with [..., 0,:] containing the
--- eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.
-selfAdjointEig :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
-                  Tensor v1 t -- ^ __input__: Shape is `[..., M, M]`.
-                  -> Tensor Value t -- ^ __output__: Shape is `[..., M+1, M]`.
-selfAdjointEig input | eqLengthGuard [] =
-    buildOp (opDef "SelfAdjointEig"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
-}
-output_arg {
-  description: "Shape is `[..., M+1, M]`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Stops gradient computation.
---
--- When executed in a graph, this op outputs its input tensor as-is.
--- 
--- When building ops to compute gradients, this op prevents the contribution of
--- its inputs to be taken into account.  Normally, the gradient generator adds ops
--- to a graph to compute the derivatives of a specified 'loss' by recursively
--- finding out inputs that contributed to its computation.  If you insert this op
--- in the graph it inputs are masked from the gradient generator.  They are not
--- taken into account for computing gradients.
--- 
--- This is useful any time you want to compute a value with TensorFlow but need
--- to pretend that the value was a constant. Some examples include:
--- 
--- *  The *EM* algorithm where the *M-step* should not involve backpropagation
---    through the output of the *E-step*.
--- *  Contrastive divergence training of Boltzmann machines where, when
---    differentiating the energy function, the training must not backpropagate
---    through the graph that generated the samples from the model.
--- *  Adversarial training, where no backprop should happen through the adversarial
---    example generation process.
-stopGradient :: forall v1 t . (TensorType t) => Tensor v1 t -- ^ __input__
-                -> Tensor Value t -- ^ __output__
-stopGradient input | eqLengthGuard [] =
-    buildOp (opDef "StopGradient"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { name: "T" type: "type" }
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Returns the index with the largest value across dimensions of a tensor.
-
-argMax :: forall v1 v2 t tidx . (TensorType t,
-                                 OneOf '[(Data.Complex.Complex Double),
-                                         (Data.Complex.Complex Float),
-                                         Data.Int.Int16, Data.Int.Int32,
-                                         Data.Int.Int64, Data.Int.Int8,
-                                         Data.Word.Word16, Data.Word.Word8,
-                                         Double, Float] t, TensorType tidx,
-                                 OneOf '[Data.Int.Int32,
-                                         Data.Int.Int64] tidx) =>
-          Tensor v1 t -- ^ __input__
-          -> Tensor v2 tidx -- ^ __dimension__: int32, 0 <= dimension < rank(input).  Describes which dimension
-                            -- of the input Tensor to reduce across. For vectors, use dimension = 0.
-          -> Tensor Value Data.Int.Int64 -- ^ __output__
-argMax input dimension | eqLengthGuard [] =
-    buildOp (opDef "ArgMax"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        input dimension
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-input_arg {
-  description: "int32, 0 <= dimension < rank(input).  Describes which dimension\nof the input Tensor to reduce across. For vectors, use dimension = 0."
-  name: "dimension"
-  type_attr: "Tidx"
-}
-output_arg { name: "output" type: DT_INT64 }
--}
-
--- | Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
---
--- For an explanation see "Differentiation of the Cholesky algorithm" by
--- Iain Murray http://arxiv.org/abs/1602.07527.
-choleskyGrad :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
-                Tensor v1 t -- ^ __l__: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
-                            -- Algorithm depends only on lower triangular part of the innermost matrices of
-                            -- this tensor.
-                -> Tensor v2 t -- ^ __grad__: df/dl where f is some scalar function. Shape is `[..., M, M]`.
-                               -- Algorithm depends only on lower triangular part of the innermost matrices of
-                               -- this tensor.
-                -> Tensor Value t -- ^ __output__: Symmetrized version of df/dA . Shape is `[..., M, M]`
-choleskyGrad l grad | eqLengthGuard [] =
-    buildOp (opDef "CholeskyGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        l grad
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor."
-  name: "l"
-  type_attr: "T"
-}
-input_arg {
-  description: "df/dl where f is some scalar function. Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor."
-  name: "grad"
-  type_attr: "T"
-}
-output_arg {
-  description: "Symmetrized version of df/dA . Shape is `[..., M, M]`"
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Reshapes a SparseTensor to represent values in a new dense shape.
---
--- This operation has the same semantics as reshape on the represented dense
--- tensor.  The `input_indices` are recomputed based on the requested `new_shape`.
--- 
--- If one component of `new_shape` is the special value -1, the size of that
--- dimension is computed so that the total dense size remains constant.  At
--- most one component of `new_shape` can be -1.  The number of dense elements
--- implied by `new_shape` must be the same as the number of dense elements
--- originally implied by `input_shape`.
--- 
--- Reshaping does not affect the order of values in the SparseTensor.
--- 
--- If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
--- has length `R_out`, then `input_indices` has shape `[N, R_in]`,
--- `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
--- `output_shape` has length `R_out`.
-sparseReshape :: Tensor v1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R_in` matrix with the indices of non-empty values in a
-                                          -- SparseTensor.
-                 -> Tensor v2 Data.Int.Int64 -- ^ __input_shape__: 1-D.  `R_in` vector with the input SparseTensor's dense shape.
-                 -> Tensor v3 Data.Int.Int64 -- ^ __new_shape__: 1-D.  `R_out` vector with the requested new dense shape.
-                 -> (Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int64)
-                 -- ^ (__output_indices__, __output_shape__)
-                 --
-                 -- * __output_indices__: 2-D.  `N x R_out` matrix with the updated indices of non-empty
-                 -- values in the output SparseTensor.
-                 --
-                 -- * __output_shape__: 1-D.  `R_out` vector with the full dense shape of the output
-                 -- SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
-                 -- filled in.
-sparseReshape input_indices input_shape new_shape | eqLengthGuard [] =
-    buildOp (opDef "SparseReshape")
-        input_indices input_shape new_shape
-{-
-input_arg {
-  description: "2-D.  `N x R_in` matrix with the indices of non-empty values in a\nSparseTensor."
-  name: "input_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  `R_in` vector with the input SparseTensor\'s dense shape."
-  name: "input_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  `R_out` vector with the requested new dense shape."
-  name: "new_shape"
-  type: DT_INT64
-}
-output_arg {
-  description: "2-D.  `N x R_out` matrix with the updated indices of non-empty\nvalues in the output SparseTensor."
-  name: "output_indices"
-  type: DT_INT64
-}
-output_arg {
-  description: "1-D.  `R_out` vector with the full dense shape of the output\nSparseTensor.  This is the same as `new_shape` but with any -1 dimensions\nfilled in."
-  name: "output_shape"
-  type: DT_INT64
-}
--}
-
--- | var: Should be from a Variable().
-
-sparseApplyAdadelta :: forall v4 v5 v6 v7 v8 t tindices . (TensorType t,
-                                                           OneOf '[(Data.Complex.Complex Double),
-                                                                   (Data.Complex.Complex Float),
-                                                                   Data.Int.Int16,
-                                                                   Data.Int.Int32,
-                                                                   Data.Int.Int64,
-                                                                   Data.Int.Int8,
-                                                                   Data.Word.Word16,
-                                                                   Data.Word.Word8,
-                                                                   Double,
-                                                                   Float] t,
-                                                           TensorType tindices,
-                                                           OneOf '[Data.Int.Int32,
-                                                                   Data.Int.Int64] tindices) =>
-                       Tensor Ref t -- ^ __var__
-                       -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
-                       -> Tensor Ref t -- ^ __accum_update__: : Should be from a Variable().
-                       -> Tensor v4 t -- ^ __lr__: Learning rate. Must be a scalar.
-                       -> Tensor v5 t -- ^ __rho__: Decay factor. Must be a scalar.
-                       -> Tensor v6 t -- ^ __epsilon__: Constant factor. Must be a scalar.
-                       -> Tensor v7 t -- ^ __grad__: The gradient.
-                       -> Tensor v8 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum.
-                       -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-sparseApplyAdadelta var accum accum_update lr rho epsilon grad
-                    indices | eqLengthGuard [] =
-    buildOp (opDef "SparseApplyAdadelta"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        var accum accum_update lr rho epsilon grad indices
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg { is_ref: true name: "var" type_attr: "T" }
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "accum"
-  type_attr: "T"
-}
-input_arg {
-  description: ": Should be from a Variable()."
-  is_ref: true
-  name: "accum_update"
-  type_attr: "T"
-}
-input_arg {
-  description: "Learning rate. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "Decay factor. Must be a scalar."
-  name: "rho"
-  type_attr: "T"
-}
-input_arg {
-  description: "Constant factor. Must be a scalar."
-  name: "epsilon"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "A vector of indices into the first dimension of var and accum."
-  name: "indices"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Computes the gradient of morphological 2-D dilation with respect to the filter.
-
-dilation2DBackpropFilter :: forall v1 v2 v3 t . (TensorType t,
-                                                 OneOf '[Data.Int.Int16,
-                                                         Data.Int.Int32,
-                                                         Data.Int.Int64,
-                                                         Data.Int.Int8,
-                                                         Data.Word.Word16,
-                                                         Data.Word.Word8,
-                                                         Double, Float] t) =>
-                            Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`.
-                            -> Tensor v2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`.
-                            -> Tensor v3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`.
-                            -> Tensor Value t -- ^ __filter_backprop__: 3-D with shape `[filter_height, filter_width, depth]`.
-dilation2DBackpropFilter input filter out_backprop | eqLengthGuard [] =
-    buildOp (opDef "Dilation2DBackpropFilter"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input filter out_backprop
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`."
-  has_minimum: true
-  minimum: 4
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`."
-  has_minimum: true
-  minimum: 4
-  name: "rates"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "4-D with shape `[batch, in_height, in_width, depth]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "3-D with shape `[filter_height, filter_width, depth]`."
-  name: "filter"
-  type_attr: "T"
-}
-input_arg {
-  description: "4-D with shape `[batch, out_height, out_width, depth]`."
-  name: "out_backprop"
-  type_attr: "T"
-}
-output_arg {
-  description: "3-D with shape `[filter_height, filter_width, depth]`."
-  name: "filter_backprop"
-  type_attr: "T"
-}
--}
-
--- | 
-
-batchSelfAdjointEigV2 :: forall v1 t . (TensorType t, OneOf '[Double,
-                                                              Float] t) =>
-                         Tensor v1 t -- ^ __input__
-                         -> (Tensor Value t, Tensor Value t) -- ^ (__e__, __v__)
-                         --
-                         -- * __e__
-                         --
-                         -- * __v__
-batchSelfAdjointEigV2 input | eqLengthGuard [] =
-    buildOp (opDef "BatchSelfAdjointEigV2"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { default_value { b: true } name: "compute_v" type: "bool" }
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "e" type_attr: "T" }
-output_arg { name: "v" type_attr: "T" }
--}
-
--- | Computes the number of incomplete elements in the given barrier.
-
-barrierIncompleteSize :: Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
-                         -> Build (Tensor Value Data.Int.Int32) -- ^ __size__: The number of incomplete elements (i.e. those with some of their value
-                         -- components not set) in the barrier.
-barrierIncompleteSize handle | eqLengthGuard [] =
-    buildOp (opDef "BarrierIncompleteSize")
-        handle
-{-
-input_arg {
-  description: "The handle to a barrier."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-output_arg {
-  description: "The number of incomplete elements (i.e. those with some of their value\ncomponents not set) in the barrier."
-  name: "size"
-  type: DT_INT32
-}
--}
-
--- | Fake-quantize the 'inputs' tensor of type float and shape `[b, h, w, d]` via
---
--- global float scalars `min` and `max` to 'outputs' tensor of same shape as
--- `inputs`.
--- 
--- [min; max] is the clamping range for the 'inputs' data.  Op divides this range
--- into 255 steps (total of 256 values), then replaces each 'inputs' value with the
--- closest of the quantized step values.
--- 
--- This operation has a gradient and thus allows for training `min` and `max` values.
-fakeQuantWithMinMaxVars :: Tensor v1 Float -- ^ __inputs__
-                           -> Tensor v2 Float -- ^ __min__
-                           -> Tensor v3 Float -- ^ __max__
-                           -> Tensor Value Float -- ^ __outputs__
-fakeQuantWithMinMaxVars inputs min max | eqLengthGuard [] =
-    buildOp (opDef "FakeQuantWithMinMaxVars")
-        inputs min max
-{-
-input_arg { name: "inputs" type: DT_FLOAT }
-input_arg { name: "min" type: DT_FLOAT }
-input_arg { name: "max" type: DT_FLOAT }
-output_arg { name: "outputs" type: DT_FLOAT }
--}
-
--- | Reads the value of a variable.
---
--- The tensor returned by this operation is immutable.
--- 
--- The value returned by this operation is guaranteed to be influenced by all the
--- writes on which this operation depends directly or indirectly, and to not be
--- influenced by any of the writes which depend directly or indirectly on this
--- operation.
-readVariableOp :: forall dtype . (TensorType dtype) =>
-                  ResourceHandle dtype -- ^ __resource__: handle to the resource in which to store the variable.
-                  -> Build (Tensor Value dtype) -- ^ __value__
-readVariableOp resource | eqLengthGuard [] =
-    buildOp (opDef "ReadVariableOp"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        resource
-{-
-attr {
-  description: "the dtype of the value." name: "dtype" type: "type"
-}
-input_arg {
-  description: "handle to the resource in which to store the variable."
-  name: "resource"
-  type: DT_RESOURCE
-}
-output_arg { name: "value" type_attr: "dtype" }
--}
-
--- | Gradient for batch normalization.
---
--- Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
--- The size of 1D Tensors matches the dimension C of the 4D Tensors.
-fusedBatchNormGrad :: forall v1 v2 v3 v4 v5 t . (TensorType t,
-                                                 OneOf '[(Data.Complex.Complex Double),
-                                                         (Data.Complex.Complex Float),
-                                                         Data.Int.Int16,
-                                                         Data.Int.Int32,
-                                                         Data.Int.Int64,
-                                                         Data.Int.Int8,
-                                                         Data.Word.Word16,
-                                                         Data.Word.Word8,
-                                                         Double, Float] t) =>
-                      Tensor v1 t -- ^ __y_backprop__: A 4D Tensor for the gradient with respect to y.
-                      -> Tensor v2 t -- ^ __x__: A 4D Tensor for input data.
-                      -> Tensor v3 t -- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x.
-                      -> Tensor v4 t -- ^ __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused
-                                     -- in the gradient computation.
-                      -> Tensor v5 t -- ^ __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance
-                                     -- in the cuDNN case), to be used in the gradient computation.
-                      -> (Tensor Value t, Tensor Value t, Tensor Value t,
-                          Tensor Value t, Tensor Value t)
-                      -- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_3__, __reserve_space_4__)
-                      --
-                      -- * __x_backprop__: A 4D Tensor for the gradient with respect to x.
-                      --
-                      -- * __scale_backprop__: A 1D Tensor for the gradient with respect to scale.
-                      --
-                      -- * __offset_backprop__: A 1D Tensor for the gradient with respect to offset.
-                      --
-                      -- * __reserve_space_3__: Unused placeholder to match the mean input in FusedBatchNorm.
-                      --
-                      -- * __reserve_space_4__: Unused placeholder to match the variance input
-                      -- in FusedBatchNorm.
-fusedBatchNormGrad y_backprop x scale reserve_space_1
-                   reserve_space_2 | eqLengthGuard [] =
-    buildOp (opDef "FusedBatchNormGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        y_backprop x scale reserve_space_1 reserve_space_2
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  description: "The data type for the elements of input and output Tensors."
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { f: 1.0e-4 }
-  description: "A small float number added to the variance of x."
-  name: "epsilon"
-  type: "float"
-}
-attr {
-  default_value { s: "NHWC" }
-  description: "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\"."
-  name: "data_format"
-  type: "string"
-}
-attr {
-  default_value { b: true }
-  description: "A bool value to indicate the operation is for training (default)\nor inference."
-  name: "is_training"
-  type: "bool"
-}
-input_arg {
-  description: "A 4D Tensor for the gradient with respect to y."
-  name: "y_backprop"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 4D Tensor for input data." name: "x" type_attr: "T"
-}
-input_arg {
-  description: "A 1D Tensor for scaling factor, to scale the normalized x."
-  name: "scale"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation."
-  name: "reserve_space_1"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be used in the gradient computation."
-  name: "reserve_space_2"
-  type_attr: "T"
-}
-output_arg {
-  description: "A 4D Tensor for the gradient with respect to x."
-  name: "x_backprop"
-  type_attr: "T"
-}
-output_arg {
-  description: "A 1D Tensor for the gradient with respect to scale."
-  name: "scale_backprop"
-  type_attr: "T"
-}
-output_arg {
-  description: "A 1D Tensor for the gradient with respect to offset."
-  name: "offset_backprop"
-  type_attr: "T"
-}
-output_arg {
-  description: "Unused placeholder to match the mean input in FusedBatchNorm."
-  name: "reserve_space_3"
-  type_attr: "T"
-}
-output_arg {
-  description: "Unused placeholder to match the variance input\nin FusedBatchNorm."
-  name: "reserve_space_4"
-  type_attr: "T"
-}
--}
-
--- | A queue that produces elements in first-in first-out order.
---
--- Variable-size shapes are allowed by setting the corresponding shape dimensions
--- to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum
--- size of any given element in the minibatch.  See below for details.
-paddingFIFOQueue :: Build (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
-paddingFIFOQueue  | eqLengthGuard [] =
-    buildOp (opDef "PaddingFIFOQueue")
-        
-{-
-attr {
-  description: "The type of each component in a value."
-  has_minimum: true
-  minimum: 1
-  name: "component_types"
-  type: "list(type)"
-}
-attr {
-  default_value { list { } }
-  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1.  In this case, the inputs\' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time."
-  has_minimum: true
-  name: "shapes"
-  type: "list(shape)"
-}
-attr {
-  default_value { i: -1 }
-  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
-  name: "capacity"
-  type: "int"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
-  name: "shared_name"
-  type: "string"
-}
-output_arg {
-  description: "The handle to the queue."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
--}
-
--- | Computes the inverse of one or more square invertible matrices or their
---
--- adjoints (conjugate transposes).
--- 
--- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
--- form square matrices. The output is a tensor of the same shape as the input
--- containing the inverse for all input submatrices `[..., :, :]`.
--- 
--- The op uses LU decomposition with partial pivoting to compute the inverses.
--- 
--- If a matrix is not invertible there is no guarantee what the op does. It
--- may detect the condition and raise an exception or it may simply return a
--- garbage result.
-matrixInverse :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
-                 Tensor v1 t -- ^ __input__: Shape is `[..., M, M]`.
-                 -> Tensor Value t -- ^ __output__: Shape is `[..., M, M]`.
-                 -- 
-                 -- @compatibility(numpy)
-                 -- Equivalent to np.linalg.inv
-                 -- @end_compatibility
-matrixInverse input | eqLengthGuard [] =
-    buildOp (opDef "MatrixInverse"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { default_value { b: false } name: "adjoint" type: "bool" }
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
-}
-output_arg {
-  description: "Shape is `[..., M, M]`.\n\n@compatibility(numpy)\nEquivalent to np.linalg.inv\n@end_compatibility"
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Outputs a `Summary` protocol buffer with audio.
---
--- The summary has up to `max_outputs` summary values containing audio. The
--- audio is built from `tensor` which must be 3-D with shape `[batch_size,
--- frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
--- assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
--- 
--- The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
--- build the `tag` of the summary values:
--- 
--- *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
--- *  If `max_outputs` is greater than 1, the summary value tags are
---    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
-audioSummaryV2 :: Tensor v1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.
-                  -> Tensor v2 Float -- ^ __tensor__: 2-D of shape `[batch_size, frames]`.
-                  -> Tensor v3 Float -- ^ __sample_rate__: The sample rate of the signal in hertz.
-                  -> Tensor Value Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
-audioSummaryV2 tag tensor sample_rate | eqLengthGuard [] =
-    buildOp (opDef "AudioSummaryV2")
-        tag tensor sample_rate
-{-
-attr {
-  default_value { i: 3 }
-  description: "Max number of batch elements to generate audio for."
-  has_minimum: true
-  minimum: 1
-  name: "max_outputs"
-  type: "int"
-}
-input_arg {
-  description: "Scalar. Used to build the `tag` attribute of the summary values."
-  name: "tag"
-  type: DT_STRING
-}
-input_arg {
-  description: "2-D of shape `[batch_size, frames]`."
-  name: "tensor"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The sample rate of the signal in hertz."
-  name: "sample_rate"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "Scalar. Serialized `Summary` protocol buffer."
-  name: "summary"
-  type: DT_STRING
-}
--}
-
--- | Computes the determinant of one ore more square matrices.
---
--- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
--- form square matrices. The output is a tensor containing the determinants
--- for all input submatrices `[..., :, :]`.
-matrixDeterminant :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
-                     Tensor v1 t -- ^ __input__: Shape is `[..., M, M]`.
-                     -> Tensor Value t -- ^ __output__: Shape is `[...]`.
-matrixDeterminant input | eqLengthGuard [] =
-    buildOp (opDef "MatrixDeterminant"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Shape is `[..., M, M]`." name: "input" type_attr: "T"
-}
-output_arg {
-  description: "Shape is `[...]`." name: "output" type_attr: "T"
-}
--}
-
--- | Writes contents to the file at input filename. Creates file if not existing.
-
-writeFile :: Tensor v1 Data.ByteString.ByteString -- ^ __filename__: scalar. The name of the file to which we write the contents.
-             -> Tensor v2 Data.ByteString.ByteString -- ^ __contents__: scalar. The content to be written to the output file.
-             -> ControlNode
-writeFile filename contents | eqLengthGuard [] =
-    buildOp (opDef "WriteFile")
-        filename contents
-{-
-input_arg {
-  description: "scalar. The name of the file to which we write the contents."
-  name: "filename"
-  type: DT_STRING
-}
-input_arg {
-  description: "scalar. The content to be written to the output file."
-  name: "contents"
-  type: DT_STRING
-}
--}
-
--- | Concatenates quantized tensors along one dimension.
-
-quantizedConcat :: forall v1 v2 v3 v4 t . (TensorType t) =>
-                   Tensor v1 Data.Int.Int32 -- ^ __concat_dim__: 0-D.  The dimension along which to concatenate.  Must be in the
-                                            -- range [0, rank(values)).
-                   -> [Tensor v2 t] -- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match,
-                                    -- and their sizes must match in all dimensions except `concat_dim`.
-                   -> [Tensor v3 Float] -- ^ __input_mins__: The minimum scalar values for each of the input tensors.
-                   -> [Tensor v4 Float] -- ^ __input_maxes__: The maximum scalar values for each of the input tensors.
-                   -> (Tensor Value t, Tensor Value Float, Tensor Value Float)
-                   -- ^ (__output__, __output_min__, __output_max__)
-                   --
-                   -- * __output__: A `Tensor` with the concatenation of values stacked along the
-                   -- `concat_dim` dimension.  This tensor's shape matches that of `values` except
-                   -- in `concat_dim` where it has the sum of the sizes.
-                   --
-                   -- * __output_min__: The float value that the minimum quantized output value represents.
-                   --
-                   -- * __output_max__: The float value that the maximum quantized output value represents.
-quantizedConcat concat_dim values input_mins
-                input_maxes | eqLengthGuard [("N", [("values", length values),
-                                                    ("input_mins", length input_mins),
-                                                    ("input_maxes", length input_maxes)])] =
-    buildOp (opDef "QuantizedConcat"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "N" .~ n)
-        concat_dim values input_mins input_maxes
-  where
-    n = fromIntegral (length values) :: Int64
-{-
-attr { has_minimum: true minimum: 2 name: "N" type: "int" }
-attr { name: "T" type: "type" }
-input_arg {
-  description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [0, rank(values))."
-  name: "concat_dim"
-  type: DT_INT32
-}
-input_arg {
-  description: "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
-  name: "values"
-  number_attr: "N"
-  type_attr: "T"
-}
-input_arg {
-  description: "The minimum scalar values for each of the input tensors."
-  name: "input_mins"
-  number_attr: "N"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The maximum scalar values for each of the input tensors."
-  name: "input_maxes"
-  number_attr: "N"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
-  name: "output"
-  type_attr: "T"
-}
-output_arg {
-  description: "The float value that the minimum quantized output value represents."
-  name: "output_min"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The float value that the maximum quantized output value represents."
-  name: "output_max"
-  type: DT_FLOAT
-}
--}
-
--- | Creates a handle to a Variable resource.
-
-varHandleOp :: forall dtype . (TensorType dtype) =>
-               Shape -- ^ __shape__: The (possibly partially specified) shape of this variable.
-               -> Build (ResourceHandle dtype) -- ^ __resource__
-varHandleOp shape | eqLengthGuard [] =
-    buildOp (opDef "VarHandleOp"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "shape" .~ shape)
-        
-{-
-attr {
-  default_value { s: "" }
-  description: "the container this variable is placed in."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "the name by which this variable is referred to."
-  name: "shared_name"
-  type: "string"
-}
-attr {
-  description: "the type of this variable. Must agree with the dtypes\nof all ops using this variable."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  description: "The (possibly partially specified) shape of this variable."
-  name: "shape"
-  type: "shape"
-}
-output_arg { name: "resource" type: DT_RESOURCE }
--}
-
--- | Assign `value` to the sliced l-value reference of `ref`.
---
--- The values of `value` are assigned to the positions in the variable
--- `ref` that are selected by the slice parameters. The slice parameters
--- `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
--- 
--- NOTE this op currently does not support broadcasting and so `value`'s
--- shape must be exactly the shape produced by the slice of `ref`.
-stridedSliceAssign :: forall v2 v3 v4 v5 t index . (TensorType t,
-                                                    TensorType index,
-                                                    OneOf '[Data.Int.Int32,
-                                                            Data.Int.Int64] index) =>
-                      Tensor Ref t -- ^ __ref__
-                      -> Tensor v2 index -- ^ __begin__
-                      -> Tensor v3 index -- ^ __end__
-                      -> Tensor v4 index -- ^ __strides__
-                      -> Tensor v5 t -- ^ __value__
-                      -> Build (Tensor Ref t) -- ^ __output_ref__
-stridedSliceAssign ref begin end strides value | eqLengthGuard [] =
-    buildOp (opDef "StridedSliceAssign"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Index" .~ tensorType (undefined :: index))
-        ref begin end strides value
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Index"
-  type: "type"
-}
-attr { default_value { i: 0 } name: "begin_mask" type: "int" }
-attr { default_value { i: 0 } name: "end_mask" type: "int" }
-attr { default_value { i: 0 } name: "ellipsis_mask" type: "int" }
-attr { default_value { i: 0 } name: "new_axis_mask" type: "int" }
-attr {
-  default_value { i: 0 } name: "shrink_axis_mask" type: "int"
-}
-input_arg { is_ref: true name: "ref" type_attr: "T" }
-input_arg { name: "begin" type_attr: "Index" }
-input_arg { name: "end" type_attr: "Index" }
-input_arg { name: "strides" type_attr: "Index" }
-input_arg { name: "value" type_attr: "T" }
-output_arg { is_ref: true name: "output_ref" type_attr: "T" }
--}
-
--- | Checks whether a resource handle-based variable has been initialized.
-
-varIsInitializedOp :: ResourceHandle dtype -- ^ __resource__: the input resource handle.
-                      -> Build (Tensor Value Bool) -- ^ __is_initialized__: a scalar boolean which is true if the variable has been
-                      -- initialized.
-varIsInitializedOp resource | eqLengthGuard [] =
-    buildOp (opDef "VarIsInitializedOp")
-        resource
-{-
-input_arg {
-  description: "the input resource handle."
-  name: "resource"
-  type: DT_RESOURCE
-}
-output_arg {
-  description: "a scalar boolean which is true if the variable has been\ninitialized."
-  name: "is_initialized"
-  type: DT_BOOL
-}
--}
-
--- | Update '*var' according to the RMSProp algorithm.
---
--- Note that in dense implementation of this algorithm, ms and mom will
--- update even if the grad is zero, but in this sparse implementation, ms
--- and mom will not update in iterations during which the grad is zero.
--- 
--- mean_square = decay * mean_square + (1-decay) * gradient ** 2
--- Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
--- 
--- ms <- rho * ms_{t-1} + (1-rho) * grad * grad
--- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
--- var <- var - mom
-sparseApplyRMSProp :: forall v4 v5 v6 v7 v8 v9 t tindices . (TensorType t,
-                                                             OneOf '[(Data.Complex.Complex Double),
-                                                                     (Data.Complex.Complex Float),
-                                                                     Data.Int.Int16,
-                                                                     Data.Int.Int32,
-                                                                     Data.Int.Int64,
-                                                                     Data.Int.Int8,
-                                                                     Data.Word.Word16,
-                                                                     Data.Word.Word8,
-                                                                     Double,
-                                                                     Float] t,
-                                                             TensorType tindices,
-                                                             OneOf '[Data.Int.Int32,
-                                                                     Data.Int.Int64] tindices) =>
-                      Tensor Ref t -- ^ __var__: Should be from a Variable().
-                      -> Tensor Ref t -- ^ __ms__: Should be from a Variable().
-                      -> Tensor Ref t -- ^ __mom__: Should be from a Variable().
-                      -> Tensor v4 t -- ^ __lr__: Scaling factor. Must be a scalar.
-                      -> Tensor v5 t -- ^ __rho__: Decay rate. Must be a scalar.
-                      -> Tensor v6 t -- ^ __momentum__
-                      -> Tensor v7 t -- ^ __epsilon__: Ridge term. Must be a scalar.
-                      -> Tensor v8 t -- ^ __grad__: The gradient.
-                      -> Tensor v9 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom.
-                      -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-sparseApplyRMSProp var ms mom lr rho momentum epsilon grad
-                   indices | eqLengthGuard [] =
-    buildOp (opDef "SparseApplyRMSProp"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        var ms mom lr rho momentum epsilon grad indices
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "ms"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "mom"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "Decay rate. Must be a scalar."
-  name: "rho"
-  type_attr: "T"
-}
-input_arg { name: "momentum" type_attr: "T" }
-input_arg {
-  description: "Ridge term. Must be a scalar."
-  name: "epsilon"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "A vector of indices into the first dimension of var, ms and mom."
-  name: "indices"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | 
-
-batchCholesky :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
-                 Tensor v1 t -- ^ __input__
-                 -> Tensor Value t -- ^ __output__
-batchCholesky input | eqLengthGuard [] =
-    buildOp (opDef "BatchCholesky"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | 
-
-tensorArrayGather :: forall v2 v3 dtype . (TensorType dtype) =>
-                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__
-                     -> Tensor v2 Data.Int.Int32 -- ^ __indices__
-                     -> Tensor v3 Float -- ^ __flow_in__
-                     -> Build (Tensor Value dtype) -- ^ __value__
-tensorArrayGather handle indices flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayGather"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        handle indices flow_in
-{-
-attr { name: "dtype" type: "type" }
-attr {
-  default_value { shape { unknown_rank: true } }
-  name: "element_shape"
-  type: "shape"
-}
-input_arg { is_ref: true name: "handle" type: DT_STRING }
-input_arg { name: "indices" type: DT_INT32 }
-input_arg { name: "flow_in" type: DT_FLOAT }
-output_arg { name: "value" type_attr: "dtype" }
--}
-
--- | Restore a reader to a previously saved state.
---
--- Not all Readers support being restored, so this can produce an
--- Unimplemented error.
-readerRestoreState :: Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
-                      -> Tensor v2 Data.ByteString.ByteString -- ^ __state__: Result of a ReaderSerializeState of a Reader with type
-                                                              -- matching reader_handle.
-                      -> Build (ControlNode)
-readerRestoreState reader_handle state | eqLengthGuard [] =
-    buildOp (opDef "ReaderRestoreState")
-        reader_handle state
-{-
-input_arg {
-  description: "Handle to a Reader."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "Result of a ReaderSerializeState of a Reader with type\nmatching reader_handle."
-  name: "state"
-  type: DT_STRING
-}
--}
-
--- | Computes the gradient for the sqrt of `x` wrt its input.
---
--- Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
--- is the corresponding input gradient.
-sqrtGrad :: forall v1 v2 t . (TensorType t,
-                              OneOf '[(Data.Complex.Complex Double),
-                                      (Data.Complex.Complex Float),
-                                      Data.Word.Word16, Double, Float] t) =>
-            Tensor v1 t -- ^ __x__
-            -> Tensor v2 t -- ^ __y__
-            -> Tensor Value t -- ^ __z__
-sqrtGrad x y | eqLengthGuard [] =
-    buildOp (opDef "SqrtGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Splits a tensor into `num_split` tensors along one dimension.
-
-split :: forall v1 v2 t . (TensorType t) =>
-         Data.Int.Int64 -- ^ __num_split__: The number of ways to split.  Must evenly divide
-                        -- `value.shape[split_dim]`.
-         -> Tensor v1 Data.Int.Int32 -- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range
-                                     -- `[0, rank(value))`.
-         -> Tensor v2 t -- ^ __value__: The tensor to split.
-         -> [Tensor Value t] -- ^ __output__: They are identically shaped tensors, whose shape matches that of `value`
-         -- except along `split_dim`, where their sizes are
-         -- `values.shape[split_dim] / num_split`.
-split num_split split_dim value | eqLengthGuard [] =
-    buildListOp [num_split] (opDef "Split"
-                             & opAttr "T" .~ tensorType (undefined :: t)
-                             & opAttr "num_split" .~ num_split)
-        split_dim value
-{-
-attr {
-  description: "The number of ways to split.  Must evenly divide\n`value.shape[split_dim]`."
-  has_minimum: true
-  minimum: 1
-  name: "num_split"
-  type: "int"
-}
-attr { name: "T" type: "type" }
-input_arg {
-  description: "0-D.  The dimension along which to split.  Must be in the range\n`[0, rank(value))`."
-  name: "split_dim"
-  type: DT_INT32
-}
-input_arg {
-  description: "The tensor to split." name: "value" type_attr: "T"
-}
-output_arg {
-  description: "They are identically shaped tensors, whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`values.shape[split_dim] / num_split`."
-  name: "output"
-  number_attr: "num_split"
-  type_attr: "T"
-}
--}
-
--- | A Reader that outputs the lines of a file delimited by '\n'.
-
-textLineReader :: Build (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
-textLineReader  | eqLengthGuard [] =
-    buildOp (opDef "TextLineReader")
-        
-{-
-attr {
-  default_value { i: 0 }
-  description: "Number of lines to skip from the beginning of every file."
-  name: "skip_header_lines"
-  type: "int"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
-  name: "shared_name"
-  type: "string"
-}
-output_arg {
-  description: "The handle to reference the Reader."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
--}
-
--- | Copy a tensor setting everything outside a central band in each innermost matrix
---
--- to zero.
--- 
--- The `band` part is computed as follows:
--- Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
--- tensor with the same shape where
--- 
--- `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
--- 
--- The indicator function
--- 
--- `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
---                  (num_upper < 0 || (n-m) <= num_upper)`.
--- 
--- For example:
--- 
--- ```prettyprint
--- # if 'input' is [[ 0,  1,  2, 3]
---                  [-1,  0,  1, 2]
---                  [-2, -1,  0, 1]
---                  [-3, -2, -1, 0]],
--- 
--- tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
---                                        [-1,  0,  1, 2]
---                                        [ 0, -1,  0, 1]
---                                        [ 0,  0, -1, 0]],
--- 
--- tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
---                                       [-1,  0,  1, 0]
---                                       [-2, -1,  0, 1]
---                                       [ 0, -2, -1, 0]]
--- ```
--- 
--- Useful special cases:
--- 
--- ```prettyprint
---  tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
---  tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
---  tf.matrix_band_part(input, 0, 0) ==> Diagonal.
--- ```
-matrixBandPart :: forall v1 v2 v3 t . (TensorType t) =>
-                  Tensor v1 t -- ^ __input__: Rank `k` tensor.
-                  -> Tensor v2 Data.Int.Int64 -- ^ __num_lower__: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
-                                              -- lower triangle.
-                  -> Tensor v3 Data.Int.Int64 -- ^ __num_upper__: 0-D tensor. Number of superdiagonals to keep. If negative, keep
-                                              -- entire upper triangle.
-                  -> Tensor Value t -- ^ __band__: Rank `k` tensor of the same shape as input. The extracted banded tensor.
-matrixBandPart input num_lower num_upper | eqLengthGuard [] =
-    buildOp (opDef "MatrixBandPart"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input num_lower num_upper
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "Rank `k` tensor." name: "input" type_attr: "T"
-}
-input_arg {
-  description: "0-D tensor. Number of subdiagonals to keep. If negative, keep entire\nlower triangle."
-  name: "num_lower"
-  type: DT_INT64
-}
-input_arg {
-  description: "0-D tensor. Number of superdiagonals to keep. If negative, keep\nentire upper triangle."
-  name: "num_upper"
-  type: DT_INT64
-}
-output_arg {
-  description: "Rank `k` tensor of the same shape as input. The extracted banded tensor."
-  name: "band"
-  type_attr: "T"
-}
--}
-
--- | Closes the given queue.
---
--- This operation signals that no more elements will be enqueued in the
--- given queue. Subsequent Enqueue(Many) operations will fail.
--- Subsequent Dequeue(Many) operations will continue to succeed if
--- sufficient elements remain in the queue. Subsequent Dequeue(Many)
--- operations that would block will fail immediately.
-queueClose :: Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue.
-              -> Build (ControlNode)
-queueClose handle | eqLengthGuard [] =
-    buildOp (opDef "QueueClose")
-        handle
-{-
-attr {
-  default_value { b: false }
-  description: "If true, all pending enqueue requests that are\nblocked on the given queue will be cancelled."
-  name: "cancel_pending_enqueues"
-  type: "bool"
-}
-input_arg {
-  description: "The handle to a queue."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
--}
-
--- | V2 format specific: merges the metadata files of sharded checkpoints.  The
---
--- result is one logical checkpoint, with one physical metadata file and renamed
--- data files.
--- 
--- Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
--- 
--- If delete_old_dirs is true, attempts to delete recursively the dirname of each
--- path in the input checkpoint_prefixes.  This is useful when those paths are non
--- user-facing temporary locations.
-mergeV2Checkpoints :: Tensor v1 Data.ByteString.ByteString -- ^ __checkpoint_prefixes__: prefixes of V2 checkpoints to merge.
-                      -> Tensor v2 Data.ByteString.ByteString -- ^ __destination_prefix__: scalar.  The desired final prefix.  Allowed to be the same
-                                                              -- as one of the checkpoint_prefixes.
-                      -> ControlNode
-mergeV2Checkpoints checkpoint_prefixes destination_prefix | eqLengthGuard [] =
-    buildOp (opDef "MergeV2Checkpoints")
-        checkpoint_prefixes destination_prefix
-{-
-attr {
-  default_value { b: true }
-  description: "see above."
-  name: "delete_old_dirs"
-  type: "bool"
-}
-input_arg {
-  description: "prefixes of V2 checkpoints to merge."
-  name: "checkpoint_prefixes"
-  type: DT_STRING
-}
-input_arg {
-  description: "scalar.  The desired final prefix.  Allowed to be the same\nas one of the checkpoint_prefixes."
-  name: "destination_prefix"
-  type: DT_STRING
-}
--}
-
--- | Computes the number of complete elements in the given barrier.
-
-barrierReadySize :: Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
-                    -> Build (Tensor Value Data.Int.Int32) -- ^ __size__: The number of complete elements (i.e. those with all of their value
-                    -- components set) in the barrier.
-barrierReadySize handle | eqLengthGuard [] =
-    buildOp (opDef "BarrierReadySize")
-        handle
-{-
-input_arg {
-  description: "The handle to a barrier."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-output_arg {
-  description: "The number of complete elements (i.e. those with all of their value\ncomponents set) in the barrier."
-  name: "size"
-  type: DT_INT32
-}
--}
-
--- | A queue that randomizes the order of elements.
-
-randomShuffleQueue :: Build (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
-randomShuffleQueue  | eqLengthGuard [] =
-    buildOp (opDef "RandomShuffleQueue")
-        
-{-
-attr {
-  description: "The type of each component in a value."
-  has_minimum: true
-  minimum: 1
-  name: "component_types"
-  type: "list(type)"
-}
-attr {
-  default_value { list { } }
-  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
-  has_minimum: true
-  name: "shapes"
-  type: "list(shape)"
-}
-attr {
-  default_value { i: -1 }
-  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
-  name: "capacity"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements."
-  name: "min_after_dequeue"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, a random seed is used."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "A second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
-  name: "shared_name"
-  type: "string"
-}
-output_arg {
-  description: "The handle to the queue."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
--}
-
--- | Returns the truth value of (x != y) element-wise.
---
--- *NOTE*: `NotEqual` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-notEqual :: forall v1 v2 t . (TensorType t,
-                              OneOf '[(Data.Complex.Complex Double),
-                                      (Data.Complex.Complex Float), Bool,
-                                      Data.ByteString.ByteString,
-                                      Data.Int.Int16, Data.Int.Int32,
-                                      Data.Int.Int64, Data.Int.Int8,
-                                      Data.Word.Word16, Data.Word.Word8, Double,
-                                      Float] t) => Tensor v1 t -- ^ __x__
-            -> Tensor v2 t -- ^ __y__
-            -> Tensor Value Bool -- ^ __z__
-notEqual x y | eqLengthGuard [] =
-    buildOp (opDef "NotEqual"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_QUINT8
-      type: DT_QINT8
-      type: DT_QINT32
-      type: DT_STRING
-      type: DT_BOOL
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type: DT_BOOL }
--}
-
--- | Greedily selects a subset of bounding boxes in descending order of score,
---
--- pruning away boxes that have high intersection-over-union (IOU) overlap
--- with previously selected boxes.  Bounding boxes are supplied as
--- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
--- diagonal pair of box corners and the coordinates can be provided as normalized
--- (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
--- is agnostic to where the origin is in the coordinate system.  Note that this
--- algorithm is invariant to orthogonal transformations and translations
--- of the coordinate system; thus translating or reflections of the coordinate
--- system result in the same boxes being selected by the algorithm.
--- 
--- The output of this operation is a set of integers indexing into the input
--- collection of bounding boxes representing the selected boxes.  The bounding
--- box coordinates corresponding to the selected indices can then be obtained
--- using the `tf.gather operation`.  For example:
--- 
---   selected_indices = tf.image.non_max_suppression(
---       boxes, scores, max_output_size, iou_threshold)
---   selected_boxes = tf.gather(boxes, selected_indices)
-nonMaxSuppression :: Tensor v1 Float -- ^ __boxes__: A 2-D float tensor of shape `[num_boxes, 4]`.
-                     -> Tensor v2 Float -- ^ __scores__: A 1-D float tensor of shape `[num_boxes]` representing a single
-                                        -- score corresponding to each box (each row of boxes).
-                     -> Tensor v3 Data.Int.Int32 -- ^ __max_output_size__: A scalar integer tensor representing the maximum number of
-                                                 -- boxes to be selected by non max suppression.
-                     -> Tensor Value Data.Int.Int32 -- ^ __selected_indices__: A 1-D integer tensor of shape `[M]` representing the selected
-                     -- indices from the boxes tensor, where `M <= max_output_size`.
-nonMaxSuppression boxes scores max_output_size | eqLengthGuard [] =
-    buildOp (opDef "NonMaxSuppression")
-        boxes scores max_output_size
-{-
-attr {
-  default_value { f: 0.5 }
-  description: "A float representing the threshold for deciding whether boxes\noverlap too much with respect to IOU."
-  name: "iou_threshold"
-  type: "float"
-}
-input_arg {
-  description: "A 2-D float tensor of shape `[num_boxes, 4]`."
-  name: "boxes"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes)."
-  name: "scores"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression."
-  name: "max_output_size"
-  type: DT_INT32
-}
-output_arg {
-  description: "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`."
-  name: "selected_indices"
-  type: DT_INT32
-}
--}
-
--- | 
-
-tensorArrayWrite :: forall v2 v3 v4 t . (TensorType t) =>
-                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__
-                    -> Tensor v2 Data.Int.Int32 -- ^ __index__
-                    -> Tensor v3 t -- ^ __value__
-                    -> Tensor v4 Float -- ^ __flow_in__
-                    -> Build (Tensor Value Float) -- ^ __flow_out__
-tensorArrayWrite handle index value flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayWrite"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        handle index value flow_in
-{-
-attr { name: "T" type: "type" }
-input_arg { is_ref: true name: "handle" type: DT_STRING }
-input_arg { name: "index" type: DT_INT32 }
-input_arg { name: "value" type_attr: "T" }
-input_arg { name: "flow_in" type: DT_FLOAT }
-output_arg { name: "flow_out" type: DT_FLOAT }
--}
-
--- | Quantizes then dequantizes a tensor.
---
--- This op simulates the precision loss from the quantized forward pass by:
--- 1. Quantizing the tensor to fixed point numbers, which should match the target
---    quantization method when it is used in inference.
--- 2. Dequantizing it back to floating point numbers for the following ops, most
---    likely matmul.
--- 
--- There are different ways to quantize. This version does not use the full range
--- of the output type, choosing to elide the lowest possible value for symmetry
--- (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit
--- quantization), so that 0.0 maps to 0.
--- 
--- To perform this op, we first find the range of values in our tensor. The range
--- we use is always centered on 0, so we find m such that
--- 
--- 1. m = max(abs(input_min), abs(input_max)) if range_given is true,
--- 2. m = max(max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
--- 
--- Our input tensor range is then [-m, m].
--- 
--- Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].
--- If signed_input is true, this is
--- 
---   [min_fixed, max_fixed ] =
---       [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].
--- 
--- Otherwise, if signed_input is false, the fixed-point range is
--- 
---   [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
--- 
--- From this we compute our scaling factor, s:
--- 
---   s = (max_fixed - min_fixed) / (2 * m).
--- 
--- Now we can quantize and dequantize the elements of our tensor.  An element e
--- is transformed into e':
--- 
---   e' = (e * s).round_to_nearest() / s.
--- 
--- Note that we have a different number of buckets in the signed vs. unsigned
--- cases.  For example, if num_bits == 8, we get 254 buckets in the signed case
--- vs. 255 in the unsigned case.
--- 
--- For example, suppose num_bits = 8 and m = 1.  Then
--- 
---   [min_fixed, max_fixed] = [-127, 127], and
---   s = (127 + 127) / 2 = 127.
--- 
--- Given the vector {-1, -0.5, 0, 0.3}, this is quantized to
--- {-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.
-quantizeAndDequantize :: forall v1 t . (TensorType t, OneOf '[Double,
-                                                              Float] t) =>
-                         Tensor v1 t -- ^ __input__: Tensor to quantize and then dequantize.
-                         -> Tensor Value t -- ^ __output__
-quantizeAndDequantize input | eqLengthGuard [] =
-    buildOp (opDef "QuantizeAndDequantize"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  default_value { b: true }
-  description: "If the quantization is signed or unsigned."
-  name: "signed_input"
-  type: "bool"
-}
-attr {
-  default_value { i: 8 }
-  description: "The bitwidth of the quantization."
-  name: "num_bits"
-  type: "int"
-}
-attr {
-  default_value { b: false }
-  description: "If the range is given or should be computed from the tensor."
-  name: "range_given"
-  type: "bool"
-}
-attr {
-  default_value { f: 0.0 }
-  description: "If range is given, this is the min of the range."
-  name: "input_min"
-  type: "float"
-}
-attr {
-  default_value { f: 0.0 }
-  description: "If range is given, this is the max of the range."
-  name: "input_max"
-  type: "float"
-}
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Tensor to quantize and then dequantize."
-  name: "input"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Returns the next record (key, value pair) produced by a Reader.
---
--- Will dequeue from the input queue if necessary (e.g. when the
--- Reader needs to start reading from a new file since it has finished
--- with the previous file).
-readerRead :: Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
-              -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a Queue, with string work items.
-              -> Build ((Tensor Value Data.ByteString.ByteString,
-                         Tensor Value Data.ByteString.ByteString))
-              -- ^ (__key__, __value__)
-              --
-              -- * __key__: A scalar.
-              --
-              -- * __value__: A scalar.
-readerRead reader_handle queue_handle | eqLengthGuard [] =
-    buildOp (opDef "ReaderRead")
-        reader_handle queue_handle
-{-
-input_arg {
-  description: "Handle to a Reader."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "Handle to a Queue, with string work items."
-  is_ref: true
-  name: "queue_handle"
-  type: DT_STRING
-}
-output_arg { description: "A scalar." name: "key" type: DT_STRING }
-output_arg {
-  description: "A scalar." name: "value" type: DT_STRING
-}
--}
-
--- | Solves systems of linear equations with upper or lower triangular matrices by
---
--- backsubstitution.
--- 
--- `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
--- square matrices. If `lower` is `True` then the strictly upper triangular part
--- of each inner-most matrix is assumed to be zero and not accessed.
--- If `lower` is False then the strictly lower triangular part of each inner-most
--- matrix is assumed to be zero and not accessed.
--- `rhs` is a tensor of shape `[..., M, K]`.
--- 
--- The output is a tensor of shape `[..., M, K]`. If `adjoint` is
--- `True` then the innermost matrices in output` satisfy matrix equations
--- `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
--- If `adjoint` is `False` then the strictly then the  innermost matrices in
--- `output` satisfy matrix equations
--- `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
-matrixTriangularSolve :: forall v1 v2 t . (TensorType t, OneOf '[Double,
-                                                                 Float] t) =>
-                         Tensor v1 t -- ^ __matrix__: Shape is `[..., M, M]`.
-                         -> Tensor v2 t -- ^ __rhs__: Shape is `[..., M, K]`.
-                         -> Tensor Value t -- ^ __output__: Shape is `[..., M, K]`.
-matrixTriangularSolve matrix rhs | eqLengthGuard [] =
-    buildOp (opDef "MatrixTriangularSolve"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        matrix rhs
-{-
-attr {
-  default_value { b: true }
-  description: "Boolean indicating whether the innermost matrices in `matrix` are\nlower or upper triangular."
-  name: "lower"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\n         adjoint.\n\n@compatibility(numpy)\nEquivalent to np.linalg.triangular_solve\n@end_compatibility"
-  name: "adjoint"
-  type: "bool"
-}
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Shape is `[..., M, M]`."
-  name: "matrix"
-  type_attr: "T"
-}
-input_arg {
-  description: "Shape is `[..., M, K]`." name: "rhs" type_attr: "T"
-}
-output_arg {
-  description: "Shape is `[..., M, K]`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Split the data from the input value into TensorArray elements.
---
--- Assuming that `lengths` takes on values
--- 
---   ```(n0, n1, ..., n(T-1))```
--- 
--- and that `value` has shape
--- 
---   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
--- 
--- this splits values into a TensorArray with T tensors.
--- 
--- TensorArray index t will be the subtensor of values with starting position
--- 
---   ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
--- 
--- and having size
--- 
---   ```nt x d0 x d1 x ...```
-tensorArraySplitV2 :: forall v1 v2 v3 v4 t . (TensorType t) =>
-                      Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
-                      -> Tensor v2 t -- ^ __value__: The concatenated tensor to write to the TensorArray.
-                      -> Tensor v3 Data.Int.Int64 -- ^ __lengths__: The vector of lengths, how to split the rows of value into the
-                                                  -- TensorArray.
-                      -> Tensor v4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
-                      -> Tensor Value Float -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
-tensorArraySplitV2 handle value lengths flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArraySplitV2"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        handle value lengths flow_in
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The handle to a TensorArray."
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "The concatenated tensor to write to the TensorArray."
-  name: "value"
-  type_attr: "T"
-}
-input_arg {
-  description: "The vector of lengths, how to split the rows of value into the\nTensorArray."
-  name: "lengths"
-  type: DT_INT64
-}
-input_arg {
-  description: "A float scalar that enforces proper chaining of operations."
-  name: "flow_in"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "A float scalar that enforces proper chaining of operations."
-  name: "flow_out"
-  type: DT_FLOAT
-}
--}
-
--- | Restores a tensor from checkpoint files.
---
--- Reads a tensor stored in one or several files. If there are several files (for
--- instance because a tensor was saved as slices), `file_pattern` may contain
--- wildcard symbols (`*` and `?`) in the filename portion only, not in the
--- directory portion.
--- 
--- If a `file_pattern` matches several files, `preferred_shard` can be used to hint
--- in which file the requested tensor is likely to be found. This op will first
--- open the file at index `preferred_shard` in the list of matching files and try
--- to restore tensors from that file.  Only if some tensors or tensor slices are
--- not found in that first file, then the Op opens all the files. Setting
--- `preferred_shard` to match the value passed as the `shard` input
--- of a matching `Save` Op may speed up Restore.  This attribute only affects
--- performance, not correctness.  The default value -1 means files are processed in
--- order.
--- 
--- See also `RestoreSlice`.
-restore :: forall v1 v2 dt . (TensorType dt) =>
-           Tensor v1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from
-                                                -- which we read the tensor.
-           -> Tensor v2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be
-                                                   -- restored.
-           -> Tensor Value dt -- ^ __tensor__: The restored tensor.
-restore file_pattern tensor_name | eqLengthGuard [] =
-    buildOp (opDef "Restore"
-             & opAttr "dt" .~ tensorType (undefined :: dt))
-        file_pattern tensor_name
-{-
-attr {
-  description: "The type of the tensor to be restored."
-  name: "dt"
-  type: "type"
-}
-attr {
-  default_value { i: -1 }
-  description: "Index of file to open first if multiple files match\n`file_pattern`."
-  name: "preferred_shard"
-  type: "int"
-}
-input_arg {
-  description: "Must have a single element. The pattern of the files from\nwhich we read the tensor."
-  name: "file_pattern"
-  type: DT_STRING
-}
-input_arg {
-  description: "Must have a single element. The name of the tensor to be\nrestored."
-  name: "tensor_name"
-  type: DT_STRING
-}
-output_arg {
-  description: "The restored tensor." name: "tensor" type_attr: "dt"
-}
--}
-
--- | Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
-
-quantizedReluX :: forall v1 v2 v3 v4 tinput out_type . (TensorType tinput,
-                                                        OneOf '[Data.Int.Int16,
-                                                                Data.Int.Int32,
-                                                                Data.Word.Word16,
-                                                                Data.Word.Word8] tinput,
-                                                        TensorType out_type,
-                                                        OneOf '[Data.Int.Int16,
-                                                                Data.Int.Int32,
-                                                                Data.Word.Word16,
-                                                                Data.Word.Word8] out_type) =>
-                  Tensor v1 tinput -- ^ __features__
-                  -> Tensor v2 Float -- ^ __max_value__
-                  -> Tensor v3 Float -- ^ __min_features__: The float value that the lowest quantized value represents.
-                  -> Tensor v4 Float -- ^ __max_features__: The float value that the highest quantized value represents.
-                  -> (Tensor Value out_type, Tensor Value Float,
-                      Tensor Value Float)
-                  -- ^ (__activations__, __min_activations__, __max_activations__)
-                  --
-                  -- * __activations__: Has the same output shape as "features".
-                  --
-                  -- * __min_activations__: The float value that the lowest quantized value represents.
-                  --
-                  -- * __max_activations__: The float value that the highest quantized value represents.
-quantizedReluX features max_value min_features max_features | eqLengthGuard [] =
-    buildOp (opDef "QuantizedReluX"
-             & opAttr "Tinput" .~ tensorType (undefined :: tinput)
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        features max_value min_features max_features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "Tinput"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  default_value { type: DT_QUINT8 }
-  name: "out_type"
-  type: "type"
-}
-input_arg { name: "features" type_attr: "Tinput" }
-input_arg { name: "max_value" type: DT_FLOAT }
-input_arg {
-  description: "The float value that the lowest quantized value represents."
-  name: "min_features"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the highest quantized value represents."
-  name: "max_features"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "Has the same output shape as \"features\"."
-  name: "activations"
-  type_attr: "out_type"
-}
-output_arg {
-  description: "The float value that the lowest quantized value represents."
-  name: "min_activations"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The float value that the highest quantized value represents."
-  name: "max_activations"
-  type: DT_FLOAT
-}
--}
-
--- | Extracts the average gradient in the given ConditionalAccumulator, provided
---
--- that sufficient (i.e., more than num_required) gradients have been accumulated.
--- The op blocks until sufficient gradients have been accumulated.
--- If the accumulator has already aggregated more than num_required gradients, it
--- returns the average of the accumulated gradients.
--- Also automatically increments the recorded global_step in the accumulator by 1,
--- and resets the aggregate to 0.
-accumulatorTakeGradient :: forall v2 dtype . (TensorType dtype,
-                                              OneOf '[(Data.Complex.Complex Double),
-                                                      (Data.Complex.Complex Float),
-                                                      Data.Int.Int16,
-                                                      Data.Int.Int32,
-                                                      Data.Int.Int64,
-                                                      Data.Int.Int8,
-                                                      Data.Word.Word16,
-                                                      Data.Word.Word8, Double,
-                                                      Float] dtype) =>
-                           Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator.
-                           -> Tensor v2 Data.Int.Int32 -- ^ __num_required__: Number of gradients required before we return an aggregate.
-                           -> Build (Tensor Value dtype) -- ^ __average__: The average of the accumulated gradients.
-accumulatorTakeGradient handle num_required | eqLengthGuard [] =
-    buildOp (opDef "AccumulatorTakeGradient"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        handle num_required
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
-  name: "dtype"
-  type: "type"
-}
-input_arg {
-  description: "The handle to an accumulator."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "Number of gradients required before we return an aggregate."
-  name: "num_required"
-  type: DT_INT32
-}
-output_arg {
-  description: "The average of the accumulated gradients."
-  name: "average"
-  type_attr: "dtype"
-}
--}
-
--- | Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
---
--- true, this follows Python semantics in that the result here is consistent
--- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
--- 
--- *NOTE*: `FloorMod` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-floorMod :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32,
-                                                    Data.Int.Int64, Double,
-                                                    Float] t) =>
-            Tensor v1 t -- ^ __x__
-            -> Tensor v2 t -- ^ __y__
-            -> Tensor Value t -- ^ __z__
-floorMod x y | eqLengthGuard [] =
-    buildOp (opDef "FloorMod"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Returns the set of files matching a pattern.
---
--- Note that this routine only supports wildcard characters in the
--- basename portion of the pattern, not in the directory portion.
-matchingFiles :: Tensor v1 Data.ByteString.ByteString -- ^ __pattern__: A (scalar) shell wildcard pattern.
-                 -> Tensor Value Data.ByteString.ByteString -- ^ __filenames__: A vector of matching filenames.
-matchingFiles pattern | eqLengthGuard [] =
-    buildOp (opDef "MatchingFiles")
-        pattern
-{-
-input_arg {
-  description: "A (scalar) shell wildcard pattern."
-  name: "pattern"
-  type: DT_STRING
-}
-output_arg {
-  description: "A vector of matching filenames."
-  name: "filenames"
-  type: DT_STRING
-}
--}
-
--- | Performs max pooling on the input.
-
-maxPool :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Float] t) =>
-           Tensor v1 t -- ^ __input__: 4-D input to pool over.
-           -> Tensor Value t -- ^ __output__: The max pooled output tensor.
-maxPool input | eqLengthGuard [] =
-    buildOp (opDef "MaxPool"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "The size of the window for each dimension of the input tensor."
-  has_minimum: true
-  minimum: 4
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the\ninput tensor."
-  has_minimum: true
-  minimum: 4
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values { list { s: "NHWC" s: "NCHW" } }
-  default_value { s: "NHWC" }
-  description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
-  name: "data_format"
-  type: "string"
-}
-input_arg {
-  description: "4-D input to pool over." name: "input" type_attr: "T"
-}
-output_arg {
-  description: "The max pooled output tensor."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes the ids of the positions in sampled_candidates that match true_labels.
---
--- When doing log-odds NCE, the result of this op should be passed through a
--- SparseToDense op, then added to the logits of the sampled candidates. This has
--- the effect of 'removing' the sampled labels that match the true labels by
--- making the classifier sure that they are sampled labels.
-computeAccidentalHits :: Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
-                         -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: The true_classes output of UnpackSparseLabels.
-                         -> Tensor v2 Data.Int.Int64 -- ^ __sampled_candidates__: The sampled_candidates output of CandidateSampler.
-                         -> (Tensor Value Data.Int.Int32,
-                             Tensor Value Data.Int.Int64, Tensor Value Float)
-                         -- ^ (__indices__, __ids__, __weights__)
-                         --
-                         -- * __indices__: A vector of indices corresponding to rows of true_candidates.
-                         --
-                         -- * __ids__: A vector of IDs of positions in sampled_candidates that match a true_label
-                         -- for the row with the corresponding index in indices.
-                         --
-                         -- * __weights__: A vector of the same length as indices and ids, in which each element
-                         -- is -FLOAT_MAX.
-computeAccidentalHits num_true true_classes
-                      sampled_candidates | eqLengthGuard [] =
-    buildOp (opDef "ComputeAccidentalHits"
-             & opAttr "num_true" .~ num_true)
-        true_classes sampled_candidates
-{-
-attr {
-  description: "Number of true labels per context."
-  name: "num_true"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "An second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-input_arg {
-  description: "The true_classes output of UnpackSparseLabels."
-  name: "true_classes"
-  type: DT_INT64
-}
-input_arg {
-  description: "The sampled_candidates output of CandidateSampler."
-  name: "sampled_candidates"
-  type: DT_INT64
-}
-output_arg {
-  description: "A vector of indices corresponding to rows of true_candidates."
-  name: "indices"
-  type: DT_INT32
-}
-output_arg {
-  description: "A vector of IDs of positions in sampled_candidates that match a true_label\nfor the row with the corresponding index in indices."
-  name: "ids"
-  type: DT_INT64
-}
-output_arg {
-  description: "A vector of the same length as indices and ids, in which each element\nis -FLOAT_MAX."
-  name: "weights"
-  type: DT_FLOAT
-}
--}
-
--- | Deserialize and concatenate `SparseTensors` from a serialized minibatch.
---
--- The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
--- `N` is the minibatch size and the rows correspond to packed outputs of
--- `SerializeSparse`.  The ranks of the original `SparseTensor` objects
--- must all match.  When the final `SparseTensor` is created, it has rank one
--- higher than the ranks of the incoming `SparseTensor` objects
--- (they have been concatenated along a new row dimension).
--- 
--- The output `SparseTensor` object's shape values for all dimensions but the
--- first are the max across the input `SparseTensor` objects' shape values
--- for the corresponding dimensions.  Its first shape value is `N`, the minibatch
--- size.
--- 
--- The input `SparseTensor` objects' indices are assumed ordered in
--- standard lexicographic order.  If this is not the case, after this
--- step run `SparseReorder` to restore index ordering.
--- 
--- For example, if the serialized input is a `[2 x 3]` matrix representing two
--- original `SparseTensor` objects:
--- 
---     index = [ 0]
---             [10]
---             [20]
---     values = [1, 2, 3]
---     shape = [50]
--- 
--- and
--- 
---     index = [ 2]
---             [10]
---     values = [4, 5]
---     shape = [30]
--- 
--- then the final deserialized `SparseTensor` will be:
--- 
---     index = [0  0]
---             [0 10]
---             [0 20]
---             [1  2]
---             [1 10]
---     values = [1, 2, 3, 4, 5]
---     shape = [2 50]
-deserializeManySparse :: forall v1 dtype . (TensorType dtype) =>
-                         Tensor v1 Data.ByteString.ByteString -- ^ __serialized_sparse__: 2-D, The `N` serialized `SparseTensor` objects.
-                                                              -- Must have 3 columns.
-                         -> (Tensor Value Data.Int.Int64, Tensor Value dtype,
-                             Tensor Value Data.Int.Int64)
-                         -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__)
-                         --
-                         -- * __sparse_indices__
-                         --
-                         -- * __sparse_values__
-                         --
-                         -- * __sparse_shape__
-deserializeManySparse serialized_sparse | eqLengthGuard [] =
-    buildOp (opDef "DeserializeManySparse"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        serialized_sparse
-{-
-attr {
-  description: "The `dtype` of the serialized `SparseTensor` objects."
-  name: "dtype"
-  type: "type"
-}
-input_arg {
-  description: "2-D, The `N` serialized `SparseTensor` objects.\nMust have 3 columns."
-  name: "serialized_sparse"
-  type: DT_STRING
-}
-output_arg { name: "sparse_indices" type: DT_INT64 }
-output_arg { name: "sparse_values" type_attr: "dtype" }
-output_arg { name: "sparse_shape" type: DT_INT64 }
--}
-
--- | Extracts crops from the input image tensor and bilinearly resizes them (possibly
---
--- with aspect ratio change) to a common output size specified by `crop_size`. This
--- is more general than the `crop_to_bounding_box` op which extracts a fixed size
--- slice from the input image and does not allow resizing or aspect ratio change.
--- 
--- Returns a tensor with `crops` from the input `image` at positions defined at the
--- bounding box locations in `boxes`. The cropped boxes are all resized (with
--- bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
--- result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.
-cropAndResize :: forall v1 v2 v3 v4 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                               Data.Int.Int32,
-                                                               Data.Int.Int64,
-                                                               Data.Int.Int8,
-                                                               Data.Word.Word16,
-                                                               Data.Word.Word8,
-                                                               Double,
-                                                               Float] t) =>
-                 Tensor v1 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
-                             -- Both `image_height` and `image_width` need to be positive.
-                 -> Tensor v2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
-                                    -- specifies the coordinates of a box in the `box_ind[i]` image and is specified
-                                    -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
-                                    -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
-                                    -- `[0, 1]` interval of normalized image height is mapped to
-                                    -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
-                                    -- which case the sampled crop is an up-down flipped version of the original
-                                    -- image. The width dimension is treated similarly. Normalized coordinates
-                                    -- outside the `[0, 1]` range are allowed, in which case we use
-                                    -- `extrapolation_value` to extrapolate the input image values.
-                 -> Tensor v3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
-                                             -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
-                 -> Tensor v4 Data.Int.Int32 -- ^ __crop_size__: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
-                                             -- cropped image patches are resized to this size. The aspect ratio of the image
-                                             -- content is not preserved. Both `crop_height` and `crop_width` need to be
-                                             -- positive.
-                 -> Tensor Value Float -- ^ __crops__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
-cropAndResize image boxes box_ind crop_size | eqLengthGuard [] =
-    buildOp (opDef "CropAndResize"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        image boxes box_ind crop_size
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "bilinear" } }
-  default_value { s: "bilinear" }
-  description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
-  name: "method"
-  type: "string"
-}
-attr {
-  default_value { f: 0.0 }
-  description: "Value used for extrapolation, when applicable."
-  name: "extrapolation_value"
-  type: "float"
-}
-input_arg {
-  description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive."
-  name: "image"
-  type_attr: "T"
-}
-input_arg {
-  description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
-  name: "boxes"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
-  name: "box_ind"
-  type: DT_INT32
-}
-input_arg {
-  description: "A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All\ncropped image patches are resized to this size. The aspect ratio of the image\ncontent is not preserved. Both `crop_height` and `crop_width` need to be\npositive."
-  name: "crop_size"
-  type: DT_INT32
-}
-output_arg {
-  description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
-  name: "crops"
-  type: DT_FLOAT
-}
--}
-
--- | Applies sparse updates to a variable reference.
---
--- This operation computes
--- 
---     # Scalar indices
---     ref[indices, ...] = updates[...]
--- 
---     # Vector indices (for each i)
---     ref[indices[i], ...] = updates[i, ...]
--- 
---     # High rank indices (for each i, ..., j)
---     ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
--- 
--- This operation outputs `ref` after the update is done.
--- This makes it easier to chain operations that need to use the reset value.
--- 
--- If values in `ref` is to be updated more than once, because there are
--- duplicate entires in `indices`, the order at which the updates happen
--- for each value is undefined.
--- 
--- Requires `updates.shape = indices.shape + ref.shape[1:]`.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/ScatterUpdate.png" alt>
--- </div>
-scatterUpdate :: forall v2 v3 t tindices . (TensorType t, TensorType tindices,
-                                            OneOf '[Data.Int.Int32,
-                                                    Data.Int.Int64] tindices) =>
-                 Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
-                 -> Tensor v2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
-                 -> Tensor v3 t -- ^ __updates__: A tensor of updated values to store in `ref`.
-                 -> Build (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
-                 -- to use the updated values after the update is done.
-scatterUpdate ref indices updates | eqLengthGuard [] =
-    buildOp (opDef "ScatterUpdate"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        ref indices updates
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: true }
-  description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a `Variable` node."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-input_arg {
-  description: "A tensor of indices into the first dimension of `ref`."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "A tensor of updated values to store in `ref`."
-  name: "updates"
-  type_attr: "T"
-}
-output_arg {
-  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
-  is_ref: true
-  name: "output_ref"
-  type_attr: "T"
-}
--}
-
--- | Outputs random values from the Gamma distribution(s) described by alpha.
---
--- This op uses the algorithm by Marsaglia et al. to acquire samples via
--- transformation-rejection from pairs of uniform and normal random variables.
--- See http://dl.acm.org/citation.cfm?id=358414
-randomGamma :: forall v1 v2 s t . (TensorType s, OneOf '[Data.Int.Int32,
-                                                         Data.Int.Int64] s,
-                                   TensorType t, OneOf '[Data.Word.Word16,
-                                                         Double, Float] t) =>
-               Tensor v1 s -- ^ __shape__: 1-D integer tensor. Shape of independent samples to draw from each
-                           -- distribution described by the shape parameters given in alpha.
-               -> Tensor v2 t -- ^ __alpha__: A tensor in which each scalar is a "shape" parameter describing the
-                              -- associated gamma distribution.
-               -> Build (Tensor Value t) -- ^ __output__: A tensor with shape `shape + shape(alpha)`. Each slice
-               -- `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
-               -- `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
-randomGamma shape alpha | eqLengthGuard [] =
-    buildOp (opDef "RandomGamma"
-             & opAttr "S" .~ tensorType (undefined :: s)
-             & opAttr "T" .~ tensorType (undefined :: t))
-        shape alpha
-{-
-attr {
-  default_value { i: 0 }
-  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "A second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "S"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in alpha."
-  name: "shape"
-  type_attr: "S"
-}
-input_arg {
-  description: "A tensor in which each scalar is a \"shape\" parameter describing the\nassociated gamma distribution."
-  name: "alpha"
-  type_attr: "T"
-}
-output_arg {
-  description: "A tensor with shape `shape + shape(alpha)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | 
-
-batchMatrixSolve :: forall v1 v2 t . (TensorType t, OneOf '[Double, Float] t) =>
-                    Tensor v1 t -- ^ __matrix__
-                    -> Tensor v2 t -- ^ __rhs__
-                    -> Tensor Value t -- ^ __output__
-batchMatrixSolve matrix rhs | eqLengthGuard [] =
-    buildOp (opDef "BatchMatrixSolve"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        matrix rhs
-{-
-attr { default_value { b: false } name: "adjoint" type: "bool" }
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "matrix" type_attr: "T" }
-input_arg { name: "rhs" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | 
-
-batchMatrixBandPart :: forall v1 v2 v3 t . (TensorType t) =>
-                       Tensor v1 t -- ^ __input__
-                       -> Tensor v2 Data.Int.Int64 -- ^ __num_lower__
-                       -> Tensor v3 Data.Int.Int64 -- ^ __num_upper__
-                       -> Tensor Value t -- ^ __band__
-batchMatrixBandPart input num_lower num_upper | eqLengthGuard [] =
-    buildOp (opDef "BatchMatrixBandPart"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input num_lower num_upper
-{-
-attr { name: "T" type: "type" }
-input_arg { name: "input" type_attr: "T" }
-input_arg { name: "num_lower" type: DT_INT64 }
-input_arg { name: "num_upper" type: DT_INT64 }
-output_arg { name: "band" type_attr: "T" }
--}
-
--- | 
-
-tensorArrayClose :: Tensor Ref Data.ByteString.ByteString -- ^ __handle__
-                    -> Build (ControlNode)
-tensorArrayClose handle | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayClose")
-        handle
-{-
-input_arg { is_ref: true name: "handle" type: DT_STRING }
--}
-
--- | Computes the "logical and" of elements across dimensions of a tensor.
---
--- Reduces `input` along the dimensions given in `reduction_indices`. Unless
--- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
--- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
--- retained with length 1.
-all :: forall v1 v2 tidx . (TensorType tidx, OneOf '[Data.Int.Int32,
-                                                     Data.Int.Int64] tidx) =>
-       Tensor v1 Bool -- ^ __input__: The tensor to reduce.
-       -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
-       -> Tensor Value Bool -- ^ __output__: The reduced tensor.
-all input reduction_indices | eqLengthGuard [] =
-    buildOp (opDef "All"
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        input reduction_indices
-{-
-attr {
-  default_value { b: false }
-  description: "If true, retain reduced dimensions with length 1."
-  name: "keep_dims"
-  type: "bool"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "The tensor to reduce." name: "input" type: DT_BOOL
-}
-input_arg {
-  description: "The dimensions to reduce."
-  name: "reduction_indices"
-  type_attr: "Tidx"
-}
-output_arg {
-  description: "The reduced tensor." name: "output" type: DT_BOOL
-}
--}
-
--- | Returns the number of records this Reader has produced.
---
--- This is the same as the number of ReaderRead executions that have
--- succeeded.
-readerNumRecordsProduced :: Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
-                            -> Build (Tensor Value Data.Int.Int64) -- ^ __records_produced__
-readerNumRecordsProduced reader_handle | eqLengthGuard [] =
-    buildOp (opDef "ReaderNumRecordsProduced")
-        reader_handle
-{-
-input_arg {
-  description: "Handle to a Reader."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
-output_arg { name: "records_produced" type: DT_INT64 }
--}
-
--- | Pop the element at the top of the stack.
-
-stackPop :: forall elem_type . (TensorType elem_type) =>
-            Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a stack.
-            -> Build (Tensor Value elem_type) -- ^ __elem__: The tensor that is popped from the top of the stack.
-stackPop handle | eqLengthGuard [] =
-    buildOp (opDef "StackPop"
-             & opAttr "elem_type" .~ tensorType (undefined :: elem_type))
-        handle
-{-
-attr {
-  description: "The type of the elem that is popped."
-  name: "elem_type"
-  type: "type"
-}
-input_arg {
-  description: "The handle to a stack."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-output_arg {
-  description: "The tensor that is popped from the top of the stack."
-  name: "elem"
-  type_attr: "elem_type"
-}
--}
-
--- | Scatter the data from the input value into specific TensorArray elements.
---
--- `indices` must be a vector, its length must match the first dim of `value`.
-tensorArrayScatterV2 :: forall v1 v2 v3 v4 t . (TensorType t) =>
-                        Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
-                        -> Tensor v2 Data.Int.Int32 -- ^ __indices__: The locations at which to write the tensor elements.
-                        -> Tensor v3 t -- ^ __value__: The concatenated tensor to write to the TensorArray.
-                        -> Tensor v4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
-                        -> Tensor Value Float -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
-tensorArrayScatterV2 handle indices value flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayScatterV2"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        handle indices value flow_in
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The handle to a TensorArray."
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "The locations at which to write the tensor elements."
-  name: "indices"
-  type: DT_INT32
-}
-input_arg {
-  description: "The concatenated tensor to write to the TensorArray."
-  name: "value"
-  type_attr: "T"
-}
-input_arg {
-  description: "A float scalar that enforces proper chaining of operations."
-  name: "flow_in"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "A float scalar that enforces proper chaining of operations."
-  name: "flow_out"
-  type: DT_FLOAT
-}
--}
-
--- | Converts one or more images from RGB to HSV.
---
--- Outputs a tensor of the same shape as the `images` tensor, containing the HSV
--- value of the pixels. The output is only well defined if the value in `images`
--- are in `[0,1]`.
--- 
--- `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
--- `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
--- corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
-rGBToHSV :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
-            Tensor v1 t -- ^ __images__: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
-            -> Tensor Value t -- ^ __output__: `images` converted to HSV.
-rGBToHSV images | eqLengthGuard [] =
-    buildOp (opDef "RGBToHSV"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        images
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "1-D or higher rank. RGB data to convert. Last dimension must be size 3."
-  name: "images"
-  type_attr: "T"
-}
-output_arg {
-  description: "`images` converted to HSV."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.
---
--- The `SparseTensor` must have rank `R` greater than 1, and the first dimension
--- is treated as the minibatch dimension.  Elements of the `SparseTensor`
--- must be sorted in increasing order of this first dimension.  The serialized
--- `SparseTensor` objects going into each row of `serialized_sparse` will have
--- rank `R-1`.
--- 
--- The minibatch size `N` is extracted from `sparse_shape[0]`.
-serializeManySparse :: forall v1 v2 v3 t . (TensorType t) =>
-                       Tensor v1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.
-                       -> Tensor v2 t -- ^ __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.
-                       -> Tensor v3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.
-                       -> Tensor Value Data.ByteString.ByteString -- ^ __serialized_sparse__
-serializeManySparse sparse_indices sparse_values
-                    sparse_shape | eqLengthGuard [] =
-    buildOp (opDef "SerializeManySparse"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        sparse_indices sparse_values sparse_shape
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "2-D.  The `indices` of the minibatch `SparseTensor`."
-  name: "sparse_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  The `values` of the minibatch `SparseTensor`."
-  name: "sparse_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  The `shape` of the minibatch `SparseTensor`."
-  name: "sparse_shape"
-  type: DT_INT64
-}
-output_arg { name: "serialized_sparse" type: DT_STRING }
--}
-
--- | Initializes a table from a text file.
---
--- It inserts one key-value pair into the table for each line of the file.
--- The key and value is extracted from the whole line content, elements from the
--- split line based on `delimiter` or the line number (starting from zero).
--- Where to extract the key and value from a line is specified by `key_index` and
--- `value_index`.
--- 
--- - A value of -1 means use the line number(starting from zero), expects `int64`.
--- - A value of -2 means use the whole line content, expects `string`.
--- - A value >= 0 means use the index (starting at zero) of the split line based
---   on `delimiter`.
-initializeTableFromTextFile :: Data.Int.Int64 -- ^ __key_index__: Column index in a line to get the table `key` values from.
-                               -> Data.Int.Int64 -- ^ __value_index__: Column index that represents information of a line to get the table
-                                                 -- `value` values from.
-                               -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized.
-                               -> Tensor v2 Data.ByteString.ByteString -- ^ __filename__: Filename of a vocabulary text file.
-                               -> Build (ControlNode)
-initializeTableFromTextFile key_index value_index table_handle
-                            filename | eqLengthGuard [] =
-    buildOp (opDef "InitializeTableFromTextFile"
-             & opAttr "key_index" .~ key_index
-             & opAttr "value_index" .~ value_index)
-        table_handle filename
-{-
-attr {
-  description: "Column index in a line to get the table `key` values from."
-  has_minimum: true
-  minimum: -2
-  name: "key_index"
-  type: "int"
-}
-attr {
-  description: "Column index that represents information of a line to get the table\n`value` values from."
-  has_minimum: true
-  minimum: -2
-  name: "value_index"
-  type: "int"
-}
-attr {
-  default_value { i: -1 }
-  description: "Number of elements of the file, use -1 if unknown."
-  has_minimum: true
-  minimum: -1
-  name: "vocab_size"
-  type: "int"
-}
-attr {
-  default_value { s: "\t" }
-  description: "Delimiter to separate fields in a line."
-  name: "delimiter"
-  type: "string"
-}
-input_arg {
-  description: "Handle to a table which will be initialized."
-  is_ref: true
-  name: "table_handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "Filename of a vocabulary text file."
-  name: "filename"
-  type: DT_STRING
-}
--}
-
--- | Decode a PNG-encoded image to a uint8 or uint16 tensor.
---
--- The attr `channels` indicates the desired number of color channels for the
--- decoded image.
--- 
--- Accepted values are:
--- 
--- *   0: Use the number of channels in the PNG-encoded image.
--- *   1: output a grayscale image.
--- *   3: output an RGB image.
--- *   4: output an RGBA image.
--- 
--- If needed, the PNG-encoded image is transformed to match the requested number
--- of color channels.
-decodePng :: forall v1 dtype . (TensorType dtype, OneOf '[Data.Word.Word16,
-                                                          Data.Word.Word8] dtype) =>
-             Tensor v1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The PNG-encoded image.
-             -> Tensor Value dtype -- ^ __image__: 3-D with shape `[height, width, channels]`.
-decodePng contents | eqLengthGuard [] =
-    buildOp (opDef "DecodePng"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        contents
-{-
-attr {
-  default_value { i: 0 }
-  description: "Number of color channels for the decoded image."
-  name: "channels"
-  type: "int"
-}
-attr {
-  allowed_values { list { type: DT_UINT8 type: DT_UINT16 } }
-  default_value { type: DT_UINT8 }
-  name: "dtype"
-  type: "type"
-}
-input_arg {
-  description: "0-D.  The PNG-encoded image."
-  name: "contents"
-  type: DT_STRING
-}
-output_arg {
-  description: "3-D with shape `[height, width, channels]`."
-  name: "image"
-  type_attr: "dtype"
-}
--}
-
--- | Get the current size of the TensorArray.
-
-tensorArraySizeV2 :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
-                     -> Tensor v2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
-                     -> Tensor Value Data.Int.Int32 -- ^ __size__: The current size of the TensorArray.
-tensorArraySizeV2 handle flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArraySizeV2")
-        handle flow_in
-{-
-input_arg {
-  description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)."
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "A float scalar that enforces proper chaining of operations."
-  name: "flow_in"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The current size of the TensorArray."
-  name: "size"
-  type: DT_INT32
-}
--}
-
--- | Returns x / y element-wise.
---
--- *NOTE*: `Div` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-div :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                               (Data.Complex.Complex Float),
-                                               Data.Int.Int16, Data.Int.Int32,
-                                               Data.Int.Int64, Data.Int.Int8,
-                                               Data.Word.Word16,
-                                               Data.Word.Word8, Double,
-                                               Float] t) =>
-       Tensor v1 t -- ^ __x__
-       -> Tensor v2 t -- ^ __y__
-       -> Tensor Value t -- ^ __z__
-div x y | eqLengthGuard [] =
-    buildOp (opDef "Div"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Generates labels for candidate sampling with a log-uniform distribution.
---
--- See explanations of candidate sampling and the data formats at
--- go/candidate-sampling.
--- 
--- For each batch, this op picks a single set of sampled candidate labels.
--- 
--- The advantages of sampling candidates per-batch are simplicity and the
--- possibility of efficient dense matrix multiplication. The disadvantage is that
--- the sampled candidates must be chosen independently of the context and of the
--- true labels.
-logUniformCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch.
-                              -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
-                              -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
-                              -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
-                                      -- candidates in a batch are unique. This requires some approximation to
-                                      -- estimate the post-rejection sampling probabilities.
-                              -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
-                                                          -- IDs of the num_true target_classes in the corresponding original label.
-                              -> (Tensor Value Data.Int.Int64,
-                                  Tensor Value Float, Tensor Value Float)
-                              -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
-                              --
-                              -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
-                              -- the ID of a sampled candidate.
-                              --
-                              -- * __true_expected_count__: A batch_size * num_true matrix, representing
-                              -- the number of times each candidate is expected to occur in a batch
-                              -- of sampled candidates. If unique=true, then this is a probability.
-                              --
-                              -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
-                              -- candidate representing the number of times the candidate is expected
-                              -- to occur in a batch of sampled candidates.  If unique=true, then this is a
-                              -- probability.
-logUniformCandidateSampler num_sampled num_true range_max unique
-                           true_classes | eqLengthGuard [] =
-    buildOp (opDef "LogUniformCandidateSampler"
-             & opAttr "num_sampled" .~ num_sampled
-             & opAttr "num_true" .~ num_true
-             & opAttr "range_max" .~ range_max
-             & opAttr "unique" .~ unique)
-        true_classes
-{-
-attr {
-  description: "Number of true labels per context."
-  has_minimum: true
-  minimum: 1
-  name: "num_true"
-  type: "int"
-}
-attr {
-  description: "Number of candidates to randomly sample per batch."
-  has_minimum: true
-  minimum: 1
-  name: "num_sampled"
-  type: "int"
-}
-attr {
-  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
-  name: "unique"
-  type: "bool"
-}
-attr {
-  description: "The sampler will sample integers from the interval [0, range_max)."
-  has_minimum: true
-  minimum: 1
-  name: "range_max"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "An second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-input_arg {
-  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
-  name: "true_classes"
-  type: DT_INT64
-}
-output_arg {
-  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
-  name: "sampled_candidates"
-  type: DT_INT64
-}
-output_arg {
-  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
-  name: "true_expected_count"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
-  name: "sampled_expected_count"
-  type: DT_FLOAT
-}
--}
-
--- | Defines a barrier that persists across different graph executions.
---
--- A barrier represents a key-value map, where each key is a string, and
--- each value is a tuple of tensors.
--- 
--- At runtime, the barrier contains 'complete' and 'incomplete'
--- elements. A complete element has defined tensors for all components of
--- its value tuple, and may be accessed using BarrierTakeMany. An
--- incomplete element has some undefined components in its value tuple,
--- and may be updated using BarrierInsertMany.
-barrier :: Build (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the barrier.
-barrier  | eqLengthGuard [] =
-    buildOp (opDef "Barrier")
-        
-{-
-attr {
-  description: "The type of each component in a value."
-  has_minimum: true
-  minimum: 1
-  name: "component_types"
-  type: "list(type)"
-}
-attr {
-  default_value { list { } }
-  description: "The shape of each component in a value. Each shape must be 1 in the\nfirst dimension. The length of this attr must be the same as the length of\ncomponent_types."
-  has_minimum: true
-  name: "shapes"
-  type: "list(shape)"
-}
-attr {
-  default_value { i: -1 }
-  description: "The capacity of the barrier.  The default capacity is MAX_INT32,\nwhich is the largest capacity of the underlying queue."
-  name: "capacity"
-  type: "int"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this barrier is placed in the given container.\nOtherwise, a default container is used."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this barrier will be shared under the given name\nacross multiple sessions."
-  name: "shared_name"
-  type: "string"
-}
-output_arg {
-  description: "The handle to the barrier."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
--}
-
--- | Creates a variable resource.
-
-createVariableOp :: forall v2 dtype . (TensorType dtype) =>
-                    ResourceHandle dtype -- ^ __resource__: handle to the resource in which to store the variable.
-                    -> Tensor v2 dtype -- ^ __value__: the value to set the new tensor to use.
-                    -> Build (ControlNode)
-createVariableOp resource value | eqLengthGuard [] =
-    buildOp (opDef "CreateVariableOp"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        resource value
-{-
-attr {
-  description: "the dtype of the value." name: "dtype" type: "type"
-}
-input_arg {
-  description: "handle to the resource in which to store the variable."
-  name: "resource"
-  type: DT_RESOURCE
-}
-input_arg {
-  description: "the value to set the new tensor to use."
-  name: "value"
-  type_attr: "dtype"
-}
--}
-
--- | Applies a gradient to a given accumulator. Does not add if local_step is lesser
---
--- than the accumulator's global_step.
-accumulatorApplyGradient :: forall v2 v3 dtype . (TensorType dtype,
-                                                  OneOf '[(Data.Complex.Complex Double),
-                                                          (Data.Complex.Complex Float),
-                                                          Data.Int.Int16,
-                                                          Data.Int.Int32,
-                                                          Data.Int.Int64,
-                                                          Data.Int.Int8,
-                                                          Data.Word.Word16,
-                                                          Data.Word.Word8,
-                                                          Double,
-                                                          Float] dtype) =>
-                            Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a accumulator.
-                            -> Tensor v2 Data.Int.Int64 -- ^ __local_step__: The local_step value at which the gradient was computed.
-                            -> Tensor v3 dtype -- ^ __gradient__: A tensor of the gradient to be accumulated.
-                            -> Build (ControlNode)
-accumulatorApplyGradient handle local_step gradient | eqLengthGuard [] =
-    buildOp (opDef "AccumulatorApplyGradient"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        handle local_step gradient
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
-  name: "dtype"
-  type: "type"
-}
-input_arg {
-  description: "The handle to a accumulator."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "The local_step value at which the gradient was computed."
-  name: "local_step"
-  type: DT_INT64
-}
-input_arg {
-  description: "A tensor of the gradient to be accumulated."
-  name: "gradient"
-  type_attr: "dtype"
-}
--}
-
--- | Outputs random values from a normal distribution.
---
--- The generated values will have mean 0 and standard deviation 1.
-randomStandardNormal :: forall v1 dtype t . (TensorType dtype,
-                                             OneOf '[Data.Word.Word16, Double,
-                                                     Float] dtype, TensorType t,
-                                             OneOf '[Data.Int.Int32,
-                                                     Data.Int.Int64] t) =>
-                        Tensor v1 t -- ^ __shape__: The shape of the output tensor.
-                        -> Build (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with random normal values.
-randomStandardNormal shape | eqLengthGuard [] =
-    buildOp (opDef "RandomStandardNormal"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "T" .~ tensorType (undefined :: t))
-        shape
-{-
-attr {
-  default_value { i: 0 }
-  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "A second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  description: "The type of the output."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The shape of the output tensor."
-  name: "shape"
-  type_attr: "T"
-}
-output_arg {
-  description: "A tensor of the specified shape filled with random normal values."
-  name: "output"
-  type_attr: "dtype"
-}
--}
-
--- | Outputs random values from a normal distribution. The parameters may each be a
---
--- scalar which applies to the entire output, or a vector of length shape[0] which
--- stores the parameters for each batch.
-parameterizedTruncatedNormal :: forall v1 v2 v3 v4 v5 dtype
-                                t . (TensorType dtype, OneOf '[Data.Word.Word16,
-                                                               Double,
-                                                               Float] dtype,
-                                     TensorType t, OneOf '[Data.Int.Int32,
-                                                           Data.Int.Int64] t) =>
-                                Tensor v1 t -- ^ __shape__: The shape of the output tensor. Batches are indexed by the 0th dimension.
-                                -> Tensor v2 dtype -- ^ __means__: The mean parameter of each batch.
-                                -> Tensor v3 dtype -- ^ __stdevs__: The standard deviation parameter of each batch. Must be greater than 0.
-                                -> Tensor v4 dtype -- ^ __minvals__: The minimum cutoff. May be -infinity.
-                                -> Tensor v5 dtype -- ^ __maxvals__: The maximum cutoff. May be +infinity, and must be more than the minval
-                                                   -- for each batch.
-                                -> Build (Tensor Value dtype) -- ^ __output__: A matrix of shape num_batches x samples_per_batch, filled with random
-                                -- truncated normal values using the parameters for each row.
-parameterizedTruncatedNormal shape means stdevs minvals
-                             maxvals | eqLengthGuard [] =
-    buildOp (opDef "ParameterizedTruncatedNormal"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "T" .~ tensorType (undefined :: t))
-        shape means stdevs minvals maxvals
-{-
-attr {
-  default_value { i: 0 }
-  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "A second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  description: "The type of the output."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The shape of the output tensor. Batches are indexed by the 0th dimension."
-  name: "shape"
-  type_attr: "T"
-}
-input_arg {
-  description: "The mean parameter of each batch."
-  name: "means"
-  type_attr: "dtype"
-}
-input_arg {
-  description: "The standard deviation parameter of each batch. Must be greater than 0."
-  name: "stdevs"
-  type_attr: "dtype"
-}
-input_arg {
-  description: "The minimum cutoff. May be -infinity."
-  name: "minvals"
-  type_attr: "dtype"
-}
-input_arg {
-  description: "The maximum cutoff. May be +infinity, and must be more than the minval\nfor each batch."
-  name: "maxvals"
-  type_attr: "dtype"
-}
-output_arg {
-  description: "A matrix of shape num_batches x samples_per_batch, filled with random\ntruncated normal values using the parameters for each row."
-  name: "output"
-  type_attr: "dtype"
-}
--}
-
--- | Updates the accumulator with a new value for global_step. Logs warning if the
---
--- accumulator's value is already higher than new_global_step.
-accumulatorSetGlobalStep :: Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator.
-                            -> Tensor v2 Data.Int.Int64 -- ^ __new_global_step__: The new global_step value to set.
-                            -> Build (ControlNode)
-accumulatorSetGlobalStep handle new_global_step | eqLengthGuard [] =
-    buildOp (opDef "AccumulatorSetGlobalStep")
-        handle new_global_step
-{-
-input_arg {
-  description: "The handle to an accumulator."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "The new global_step value to set."
-  name: "new_global_step"
-  type: DT_INT64
-}
--}
-
--- | Resize `images` to `size` using bilinear interpolation.
---
--- Input images can be of different types but output images are always float.
-resizeBilinear :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                          Data.Int.Int32,
-                                                          Data.Int.Int64,
-                                                          Data.Int.Int8,
-                                                          Data.Word.Word16,
-                                                          Data.Word.Word8,
-                                                          Double, Float] t) =>
-                  Tensor v1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
-                  -> Tensor v2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
-                                              -- new size for the images.
-                  -> Tensor Value Float -- ^ __resized_images__: 4-D with shape
-                  -- `[batch, new_height, new_width, channels]`.
-resizeBilinear images size | eqLengthGuard [] =
-    buildOp (opDef "ResizeBilinear"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        images size
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
-  name: "align_corners"
-  type: "bool"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "images"
-  type_attr: "T"
-}
-input_arg {
-  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
-  name: "size"
-  type: DT_INT32
-}
-output_arg {
-  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
-  name: "resized_images"
-  type: DT_FLOAT
-}
--}
-
--- | Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
---
--- [min_range, max_range] are scalar floats that specify the range for
--- the 'input' data. The 'mode' attribute controls exactly which calculations are
--- used to convert the float values to their quantized equivalents.
--- 
--- In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
--- 
--- ```
--- out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
--- if T == qint8, out[i] -= (range(T) + 1) / 2.0
--- ```
--- here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
--- 
--- *MIN_COMBINED Mode Example*
--- 
--- Assume the input is type float and has a possible range of [0.0, 6.0] and the
--- output type is quint8 ([0, 255]). The min_range and max_range values should be
--- specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
--- value of the input by 255/6 and cast to quint8.
--- 
--- If the output type was qint8 ([-128, 127]), the operation will additionally
--- subtract each value by 128 prior to casting, so that the range of values aligns
--- with the range of qint8.
--- 
--- If the mode is 'MIN_FIRST', then this approach is used:
--- 
--- ```
--- number_of_steps = 1 << (# of bits in T)
--- range_adjust = number_of_steps / (number_of_steps - 1)
--- range = (range_max - range_min) * range_adjust
--- range_scale = number_of_steps / range
--- quantized = round(input * range_scale) - round(range_min * range_scale) +
---   numeric_limits<T>::min()
--- quantized = max(quantized, numeric_limits<T>::min())
--- quantized = min(quantized, numeric_limits<T>::max())
--- ```
--- 
--- The biggest difference between this and MIN_COMBINED is that the minimum range
--- is rounded first, before it's subtracted from the rounded value. With
--- MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
--- and dequantizing will introduce a larger and larger error.
--- 
--- One thing to watch out for is that the operator may choose to adjust the
--- requested minimum and maximum values slightly during the quantization process,
--- so you should always use the output ports as the range for further calculations.
--- For example, if the requested minimum and maximum values are close to equal,
--- they will be separated by a small epsilon value to prevent ill-formed quantized
--- buffers from being created. Otherwise, you can end up with buffers where all the
--- quantized values map to the same float value, which causes problems for
--- operations that have to perform further calculations on them.
-quantizeV2 :: forall v1 v2 v3 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                         Data.Int.Int32,
-                                                         Data.Word.Word16,
-                                                         Data.Word.Word8] t) =>
-              Tensor v1 Float -- ^ __input__
-              -> Tensor v2 Float -- ^ __min_range__: The minimum scalar value possibly produced for the input.
-              -> Tensor v3 Float -- ^ __max_range__: The maximum scalar value possibly produced for the input.
-              -> (Tensor Value t, Tensor Value Float, Tensor Value Float)
-              -- ^ (__output__, __output_min__, __output_max__)
-              --
-              -- * __output__: The quantized data produced from the float input.
-              --
-              -- * __output_min__: The actual minimum scalar value used for the output.
-              --
-              -- * __output_max__: The actual maximum scalar value used for the output.
-quantizeV2 input min_range max_range | eqLengthGuard [] =
-    buildOp (opDef "QuantizeV2"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input min_range max_range
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "MIN_COMBINED" s: "MIN_FIRST" } }
-  default_value { s: "MIN_COMBINED" }
-  name: "mode"
-  type: "string"
-}
-input_arg { name: "input" type: DT_FLOAT }
-input_arg {
-  description: "The minimum scalar value possibly produced for the input."
-  name: "min_range"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The maximum scalar value possibly produced for the input."
-  name: "max_range"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The quantized data produced from the float input."
-  name: "output"
-  type_attr: "T"
-}
-output_arg {
-  description: "The actual minimum scalar value used for the output."
-  name: "output_min"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The actual maximum scalar value used for the output."
-  name: "output_max"
-  type: DT_FLOAT
-}
--}
-
--- | Decode a JPEG-encoded image to a uint8 tensor.
---
--- The attr `channels` indicates the desired number of color channels for the
--- decoded image.
--- 
--- Accepted values are:
--- 
--- *   0: Use the number of channels in the JPEG-encoded image.
--- *   1: output a grayscale image.
--- *   3: output an RGB image.
--- 
--- If needed, the JPEG-encoded image is transformed to match the requested number
--- of color channels.
--- 
--- The attr `ratio` allows downscaling the image by an integer factor during
--- decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
--- downscaling the image later.
-decodeJpeg :: Tensor v1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The JPEG-encoded image.
-              -> Tensor Value Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`..
-decodeJpeg contents | eqLengthGuard [] =
-    buildOp (opDef "DecodeJpeg")
-        contents
-{-
-attr {
-  default_value { i: 0 }
-  description: "Number of color channels for the decoded image."
-  name: "channels"
-  type: "int"
-}
-attr {
-  default_value { i: 1 }
-  description: "Downscaling ratio."
-  name: "ratio"
-  type: "int"
-}
-attr {
-  default_value { b: true }
-  description: "If true use a slower but nicer upscaling of the\nchroma planes (yuv420/422 only)."
-  name: "fancy_upscaling"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "If true try to recover an image from truncated input."
-  name: "try_recover_truncated"
-  type: "bool"
-}
-attr {
-  default_value { f: 1.0 }
-  description: "The minimum required fraction of lines before a truncated\ninput is accepted."
-  name: "acceptable_fraction"
-  type: "float"
-}
-input_arg {
-  description: "0-D.  The JPEG-encoded image."
-  name: "contents"
-  type: DT_STRING
-}
-output_arg {
-  description: "3-D with shape `[height, width, channels]`.."
-  name: "image"
-  type: DT_UINT8
-}
--}
-
--- | Computes the power of one value to another.
---
--- Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
--- corresponding elements in `x` and `y`. For example:
--- 
--- ```
--- # tensor 'x' is [[2, 2]], [3, 3]]
--- # tensor 'y' is [[8, 16], [2, 3]]
--- tf.pow(x, y) ==> [[256, 65536], [9, 27]]
--- ```
-pow :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                               (Data.Complex.Complex Float),
-                                               Data.Int.Int32, Data.Int.Int64,
-                                               Data.Word.Word16, Double,
-                                               Float] t) =>
-       Tensor v1 t -- ^ __x__
-       -> Tensor v2 t -- ^ __y__
-       -> Tensor Value t -- ^ __z__
-pow x y | eqLengthGuard [] =
-    buildOp (opDef "Pow"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Forwards the input to the output.
---
--- This operator represents the loop termination condition used by the
--- "pivot" switches of a loop.
-loopCond :: Tensor v1 Bool -- ^ __input__: A boolean scalar, representing the branch predicate of the Switch op.
-            -> Tensor Value Bool -- ^ __output__: The same tensor as `input`.
-loopCond input | eqLengthGuard [] =
-    buildOp (opDef "LoopCond")
-        input
-{-
-input_arg {
-  description: "A boolean scalar, representing the branch predicate of the Switch op."
-  name: "input"
-  type: DT_BOOL
-}
-output_arg {
-  description: "The same tensor as `input`."
-  name: "output"
-  type: DT_BOOL
-}
--}
-
--- | Reads and outputs the entire contents of the input filename.
-
-readFile :: Tensor v1 Data.ByteString.ByteString -- ^ __filename__
-            -> Tensor Value Data.ByteString.ByteString -- ^ __contents__
-readFile filename | eqLengthGuard [] =
-    buildOp (opDef "ReadFile")
-        filename
-{-
-input_arg { name: "filename" type: DT_STRING }
-output_arg { name: "contents" type: DT_STRING }
--}
-
--- | Returns the imaginary part of a complex number.
---
--- Given a tensor `input` of complex numbers, this operation returns a tensor of
--- type `float` that is the imaginary part of each element in `input`. All
--- elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
--- is the real part and *b* is the imaginary part returned by this operation.
--- 
--- For example:
--- 
--- ```
--- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
--- tf.imag(input) ==> [4.75, 5.75]
--- ```
-imag :: forall v1 t tout . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                  (Data.Complex.Complex Float)] t,
-                            TensorType tout, OneOf '[Double, Float] tout) =>
-        Tensor v1 t -- ^ __input__
-        -> Tensor Value tout -- ^ __output__
-imag input | eqLengthGuard [] =
-    buildOp (opDef "Imag"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tout" .~ tensorType (undefined :: tout))
-        input
-{-
-attr {
-  allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } }
-  default_value { type: DT_COMPLEX64 }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  default_value { type: DT_FLOAT }
-  name: "Tout"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "Tout" }
--}
-
--- | 
-
-tensorArrayGrad :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__
-                   -> Tensor v2 Float -- ^ __flow_in__
-                   -> Build (Tensor Ref Data.ByteString.ByteString) -- ^ __grad_handle__
-tensorArrayGrad handle flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayGrad")
-        handle flow_in
-{-
-attr { name: "source" type: "string" }
-input_arg { name: "handle" type: DT_STRING }
-input_arg { name: "flow_in" type: DT_FLOAT }
-output_arg { is_ref: true name: "grad_handle" type: DT_STRING }
--}
-
--- | Outputs a `Summary` protocol buffer with a histogram.
---
--- The generated
--- [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
--- has one summary value containing a histogram for `values`.
--- 
--- This op reports an `InvalidArgument` error if any value is not finite.
-histogramSummary :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                            Data.Int.Int32,
-                                                            Data.Int.Int64,
-                                                            Data.Int.Int8,
-                                                            Data.Word.Word16,
-                                                            Data.Word.Word8,
-                                                            Double, Float] t) =>
-                    Tensor v1 Data.ByteString.ByteString -- ^ __tag__: Scalar.  Tag to use for the `Summary.Value`.
-                    -> Tensor v2 t -- ^ __values__: Any shape. Values to use to build the histogram.
-                    -> Tensor Value Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
-histogramSummary tag values | eqLengthGuard [] =
-    buildOp (opDef "HistogramSummary"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        tag values
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Scalar.  Tag to use for the `Summary.Value`."
-  name: "tag"
-  type: DT_STRING
-}
-input_arg {
-  description: "Any shape. Values to use to build the histogram."
-  name: "values"
-  type_attr: "T"
-}
-output_arg {
-  description: "Scalar. Serialized `Summary` protocol buffer."
-  name: "summary"
-  type: DT_STRING
-}
--}
-
--- | Computes the gradients of 3-D convolution with respect to the input.
-
-conv3DBackpropInputV2 :: forall v1 v2 v3 t . (TensorType t,
-                                              OneOf '[(Data.Complex.Complex Double),
-                                                      (Data.Complex.Complex Float),
-                                                      Data.Int.Int16,
-                                                      Data.Int.Int32,
-                                                      Data.Int.Int64,
-                                                      Data.Int.Int8,
-                                                      Data.Word.Word16,
-                                                      Data.Word.Word8, Double,
-                                                      Float] t) =>
-                         Tensor v1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the tensor shape of `input`,
-                                                  -- where `input` is a 5-D
-                                                  -- `[batch, depth, rows, cols, in_channels]` tensor.
-                         -> Tensor v2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
-                                        -- `in_channels` must match between `input` and `filter`.
-                         -> Tensor v3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
-                                        -- out_channels]`.
-                         -> Tensor Value t -- ^ __output__
-conv3DBackpropInputV2 input_sizes filter out_backprop | eqLengthGuard [] =
-    buildOp (opDef "Conv3DBackpropInputV2"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input_sizes filter out_backprop
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "An integer vector representing the tensor shape of `input`,\nwhere `input` is a 5-D\n`[batch, depth, rows, cols, in_channels]` tensor."
-  name: "input_sizes"
-  type: DT_INT32
-}
-input_arg {
-  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
-  name: "filter"
-  type_attr: "T"
-}
-input_arg {
-  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
-  name: "out_backprop"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Computes the gradient of bilinear interpolation.
-
-resizeBilinearGrad :: forall v1 v2 t . (TensorType t, OneOf '[Data.Word.Word16,
-                                                              Double,
-                                                              Float] t) =>
-                      Tensor v1 Float -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`.
-                      -> Tensor v2 t -- ^ __original_image__: 4-D with shape `[batch, orig_height, orig_width, channels]`,
-                                     -- The image tensor that was resized.
-                      -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`.
-                      -- Gradients with respect to the input image. Input image must have been
-                      -- float or double.
-resizeBilinearGrad grads original_image | eqLengthGuard [] =
-    buildOp (opDef "ResizeBilinearGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        grads original_image
-{-
-attr {
-  allowed_values {
-    list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension."
-  name: "align_corners"
-  type: "bool"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "grads"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "4-D with shape `[batch, orig_height, orig_width, channels]`,\nThe image tensor that was resized."
-  name: "original_image"
-  type_attr: "T"
-}
-output_arg {
-  description: "4-D with shape `[batch, orig_height, orig_width, channels]`.\nGradients with respect to the input image. Input image must have been\nfloat or double."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
---
--- A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
--- `sparse_values`, and `sparse_shape`, where
--- 
--- ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
--- 
--- An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
--- having a first `sparse_indices` column taking values between `[0, N)`, where
--- the minibatch size `N == sparse_shape[0]`.
--- 
--- The input `SparseTensor` must have rank `R` greater than 1, and the first
--- dimension is treated as the minibatch dimension.  Elements of the `SparseTensor`
--- must be sorted in increasing order of this first dimension.  The stored
--- `SparseTensor` objects pointed to by each row of the output `sparse_handles`
--- will have rank `R-1`.
--- 
--- The `SparseTensor` values can then be read out as part of a minibatch by passing
--- the given keys as vector elements to `TakeManySparseFromTensorsMap`.  To ensure
--- the correct `SparseTensorsMap` is accessed, ensure that the same
--- `container` and `shared_name` are passed to that Op.  If no `shared_name`
--- is provided here, instead use the *name* of the Operation created by calling
--- `AddManySparseToTensorsMap` as the `shared_name` passed to
--- `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
-addManySparseToTensorsMap :: forall v1 v2 v3 t . (TensorType t) =>
-                             Tensor v1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D.  The `indices` of the minibatch `SparseTensor`.
-                                                      -- `sparse_indices[:, 0]` must be ordered values in `[0, N)`.
-                             -> Tensor v2 t -- ^ __sparse_values__: 1-D.  The `values` of the minibatch `SparseTensor`.
-                             -> Tensor v3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D.  The `shape` of the minibatch `SparseTensor`.
-                                                         -- The minibatch size `N == sparse_shape[0]`.
-                             -> Build (Tensor Value Data.Int.Int64) -- ^ __sparse_handles__: 1-D.  The handles of the `SparseTensor` now stored in the
-                             -- `SparseTensorsMap`.  Shape: `[N]`.
-addManySparseToTensorsMap sparse_indices sparse_values
-                          sparse_shape | eqLengthGuard [] =
-    buildOp (opDef "AddManySparseToTensorsMap"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        sparse_indices sparse_values sparse_shape
-{-
-attr { name: "T" type: "type" }
-attr {
-  default_value { s: "" }
-  description: "The container name for the `SparseTensorsMap` created by this op."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation\'s unique name is used."
-  name: "shared_name"
-  type: "string"
-}
-input_arg {
-  description: "2-D.  The `indices` of the minibatch `SparseTensor`.\n`sparse_indices[:, 0]` must be ordered values in `[0, N)`."
-  name: "sparse_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  The `values` of the minibatch `SparseTensor`."
-  name: "sparse_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  The `shape` of the minibatch `SparseTensor`.\nThe minibatch size `N == sparse_shape[0]`."
-  name: "sparse_shape"
-  type: DT_INT64
-}
-output_arg {
-  description: "1-D.  The handles of the `SparseTensor` now stored in the\n`SparseTensorsMap`.  Shape: `[N]`."
-  name: "sparse_handles"
-  type: DT_INT64
-}
--}
-
--- | 
-
-batchIFFT :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
-             -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
-batchIFFT input | eqLengthGuard [] =
-    buildOp (opDef "BatchIFFT")
-        input
-{-
-input_arg { name: "input" type: DT_COMPLEX64 }
-output_arg { name: "output" type: DT_COMPLEX64 }
--}
-
--- | 
-
-batchMatrixDeterminant :: forall v1 t . (TensorType t, OneOf '[Double,
-                                                               Float] t) =>
-                          Tensor v1 t -- ^ __input__
-                          -> Tensor Value t -- ^ __output__
-batchMatrixDeterminant input | eqLengthGuard [] =
-    buildOp (opDef "BatchMatrixDeterminant"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Delete the tensor specified by its handle in the session.
-
-deleteSessionTensor :: Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state.
-                       -> ControlNode
-deleteSessionTensor handle | eqLengthGuard [] =
-    buildOp (opDef "DeleteSessionTensor")
-        handle
-{-
-input_arg {
-  description: "The handle for a tensor stored in the session state."
-  name: "handle"
-  type: DT_STRING
-}
--}
-
--- | Computes the number of elements in the given table.
-
-lookupTableSize :: Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
-                   -> Build (Tensor Value Data.Int.Int64) -- ^ __size__: Scalar that contains number of elements in the table.
-lookupTableSize table_handle | eqLengthGuard [] =
-    buildOp (opDef "LookupTableSize")
-        table_handle
-{-
-input_arg {
-  description: "Handle to the table."
-  is_ref: true
-  name: "table_handle"
-  type: DT_STRING
-}
-output_arg {
-  description: "Scalar that contains number of elements in the table."
-  name: "size"
-  type: DT_INT64
-}
--}
-
--- | Computes rectified linear: `max(features, 0)`.
-
-relu :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
-                                             Data.Int.Int64, Data.Int.Int8,
-                                             Data.Word.Word16, Data.Word.Word8,
-                                             Double, Float] t) =>
-        Tensor v1 t -- ^ __features__
-        -> Tensor Value t -- ^ __activations__
-relu features | eqLengthGuard [] =
-    buildOp (opDef "Relu"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "features" type_attr: "T" }
-output_arg { name: "activations" type_attr: "T" }
--}
-
--- | Interleave the values from the `data` tensors into a single tensor.
---
--- Builds a merged tensor such that
--- 
--- ```python
---     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
--- ```
--- 
--- For example, if each `indices[m]` is scalar or vector, we have
--- 
--- ```python
---     # Scalar indices:
---     merged[indices[m], ...] = data[m][...]
--- 
---     # Vector indices:
---     merged[indices[m][i], ...] = data[m][i, ...]
--- ```
--- 
--- Each `data[i].shape` must start with the corresponding `indices[i].shape`,
--- and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
--- must have `data[i].shape = indices[i].shape + constant`.  In terms of this
--- `constant`, the output shape is
--- 
---     merged.shape = [max(indices)] + constant
--- 
--- Values are merged in order, so if an index appears in both `indices[m][i]` and
--- `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
--- merged result.
--- 
--- For example:
--- 
--- ```python
---     indices[0] = 6
---     indices[1] = [4, 1]
---     indices[2] = [[5, 2], [0, 3]]
---     data[0] = [61, 62]
---     data[1] = [[41, 42], [11, 12]]
---     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
---     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
---               [51, 52], [61, 62]]
--- ```
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/DynamicStitch.png" alt>
--- </div>
-dynamicStitch :: forall v1 v2 t . (TensorType t) =>
-                 [Tensor v1 Data.Int.Int32] -- ^ __indices__
-                 -> [Tensor v2 t] -- ^ __data__
-                 -> Tensor Value t -- ^ __merged__
-dynamicStitch indices data' | eqLengthGuard [("N", [("indices", length indices),
-                                                    ("data", length data')])] =
-    buildOp (opDef "DynamicStitch"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "N" .~ n)
-        indices data'
-  where
-    n = fromIntegral (length indices) :: Int64
-{-
-attr { has_minimum: true minimum: 1 name: "N" type: "int" }
-attr { name: "T" type: "type" }
-input_arg { name: "indices" number_attr: "N" type: DT_INT32 }
-input_arg { name: "data" number_attr: "N" type_attr: "T" }
-output_arg { name: "merged" type_attr: "T" }
--}
-
--- | Looks up keys in a table, outputs the corresponding values.
---
--- The tensor `keys` must of the same type as the keys of the table.
--- The output `values` is of the type of the table values.
--- 
--- The scalar `default_value` is the value output for keys not present in the
--- table. It must also be of the same type as the table values.
-lookupTableFind :: forall v2 v3 tin tout . (TensorType tin, TensorType tout) =>
-                   Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
-                   -> Tensor v2 tin -- ^ __keys__: Any shape.  Keys to look up.
-                   -> Tensor v3 tout -- ^ __default_value__
-                   -> Build (Tensor Value tout) -- ^ __values__: Same shape as `keys`.  Values found in the table, or `default_values`
-                   -- for missing keys.
-lookupTableFind table_handle keys default_value | eqLengthGuard [] =
-    buildOp (opDef "LookupTableFind"
-             & opAttr "Tin" .~ tensorType (undefined :: tin)
-             & opAttr "Tout" .~ tensorType (undefined :: tout))
-        table_handle keys default_value
-{-
-attr { name: "Tin" type: "type" }
-attr { name: "Tout" type: "type" }
-input_arg {
-  description: "Handle to the table."
-  is_ref: true
-  name: "table_handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "Any shape.  Keys to look up."
-  name: "keys"
-  type_attr: "Tin"
-}
-input_arg { name: "default_value" type_attr: "Tout" }
-output_arg {
-  description: "Same shape as `keys`.  Values found in the table, or `default_values`\nfor missing keys."
-  name: "values"
-  type_attr: "Tout"
-}
--}
-
--- | Generate a single randomly distorted bounding box for an image.
---
--- Bounding box annotations are often supplied in addition to ground-truth labels
--- in image recognition or object localization tasks. A common technique for
--- training such a system is to randomly distort an image while preserving
--- its content, i.e. *data augmentation*. This Op outputs a randomly distorted
--- localization of an object, i.e. bounding box, given an `image_size`,
--- `bounding_boxes` and a series of constraints.
--- 
--- The output of this Op is a single bounding box that may be used to crop the
--- original image. The output is returned as 3 tensors: `begin`, `size` and
--- `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
--- image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
--- what the bounding box looks like.
--- 
--- Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
--- bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
--- height of the underlying image.
--- 
--- For example,
--- 
--- ```python
---     # Generate a single distorted bounding box.
---     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
---         tf.shape(image),
---         bounding_boxes=bounding_boxes)
--- 
---     # Draw the bounding box in an image summary.
---     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
---                                                   bbox_for_draw)
---     tf.image_summary('images_with_box', image_with_box)
--- 
---     # Employ the bounding box to distort the image.
---     distorted_image = tf.slice(image, begin, size)
--- ```
--- 
--- Note that if no bounding box information is available, setting
--- `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
--- bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
--- false and no bounding boxes are supplied, an error is raised.
-sampleDistortedBoundingBox :: forall v1 v2 t . (TensorType t,
-                                                OneOf '[Data.Int.Int16,
-                                                        Data.Int.Int32,
-                                                        Data.Int.Int64,
-                                                        Data.Int.Int8,
-                                                        Data.Word.Word8] t) =>
-                              Tensor v1 t -- ^ __image_size__: 1-D, containing `[height, width, channels]`.
-                              -> Tensor v2 Float -- ^ __bounding_boxes__: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
-                                                 -- associated with the image.
-                              -> Build ((Tensor Value t, Tensor Value t,
-                                         Tensor Value Float))
-                              -- ^ (__begin__, __size__, __bboxes__)
-                              --
-                              -- * __begin__: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
-                              -- `tf.slice`.
-                              --
-                              -- * __size__: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
-                              -- `tf.slice`.
-                              --
-                              -- * __bboxes__: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
-                              -- Provide as input to `tf.image.draw_bounding_boxes`.
-sampleDistortedBoundingBox image_size bounding_boxes | eqLengthGuard [] =
-    buildOp (opDef "SampleDistortedBoundingBox"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        image_size bounding_boxes
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`.  Otherwise, it is seeded by a random\nseed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "A second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr {
-  default_value { f: 0.1 }
-  description: "The cropped area of the image must contain at least this\nfraction of any bounding box supplied."
-  name: "min_object_covered"
-  type: "float"
-}
-attr {
-  default_value { list { f: 0.75 f: 1.33 } }
-  description: "The cropped area of the image must have an aspect ratio =\nwidth / height within this range."
-  name: "aspect_ratio_range"
-  type: "list(float)"
-}
-attr {
-  default_value { list { f: 5.0e-2 f: 1.0 } }
-  description: "The cropped area of the image must contain a fraction of the\nsupplied image within in this range."
-  name: "area_range"
-  type: "list(float)"
-}
-attr {
-  default_value { i: 100 }
-  description: "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage."
-  name: "max_attempts"
-  type: "int"
-}
-attr {
-  default_value { b: false }
-  description: "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error."
-  name: "use_image_if_no_bounding_boxes"
-  type: "bool"
-}
-input_arg {
-  description: "1-D, containing `[height, width, channels]`."
-  name: "image_size"
-  type_attr: "T"
-}
-input_arg {
-  description: "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image."
-  name: "bounding_boxes"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`."
-  name: "begin"
-  type_attr: "T"
-}
-output_arg {
-  description: "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`."
-  name: "size"
-  type_attr: "T"
-}
-output_arg {
-  description: "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`."
-  name: "bboxes"
-  type: DT_FLOAT
-}
--}
-
--- | Splits a tensor into `num_split` tensors along one dimension.
-
-splitV :: forall v1 v2 v3 t tlen . (TensorType t, TensorType tlen,
-                                    OneOf '[Data.Int.Int32,
-                                            Data.Int.Int64] tlen) =>
-          Data.Int.Int64 -- ^ __num_split__
-          -> Tensor v1 t -- ^ __value__: The tensor to split.
-          -> Tensor v2 tlen -- ^ __size_splits__: list containing the sizes of each output tensor along the split
-                            -- dimension. Must sum to the dimension of value along split_dim.
-                            -- Can contain one -1 indicating that dimension is to be inferred.
-          -> Tensor v3 Data.Int.Int32 -- ^ __split_dim__: 0-D.  The dimension along which to split.  Must be in the range
-                                      -- `[0, rank(value))`.
-          -> [Tensor Value t] -- ^ __output__: Tensors whose shape matches that of `value`
-          -- except along `split_dim`, where their sizes are
-          -- `size_splits[i]`.
-splitV num_split value size_splits split_dim | eqLengthGuard [] =
-    buildListOp [num_split] (opDef "SplitV"
-                             & opAttr "T" .~ tensorType (undefined :: t)
-                             & opAttr "Tlen" .~ tensorType (undefined :: tlen)
-                             & opAttr "num_split" .~ num_split)
-        value size_splits split_dim
-{-
-attr { has_minimum: true minimum: 1 name: "num_split" type: "int" }
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT64 }
-  name: "Tlen"
-  type: "type"
-}
-input_arg {
-  description: "The tensor to split." name: "value" type_attr: "T"
-}
-input_arg {
-  description: "list containing the sizes of each output tensor along the split\ndimension. Must sum to the dimension of value along split_dim.\nCan contain one -1 indicating that dimension is to be inferred."
-  name: "size_splits"
-  type_attr: "Tlen"
-}
-input_arg {
-  description: "0-D.  The dimension along which to split.  Must be in the range\n`[0, rank(value))`."
-  name: "split_dim"
-  type: DT_INT32
-}
-output_arg {
-  description: "Tensors whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`size_splits[i]`."
-  name: "output"
-  number_attr: "num_split"
-  type_attr: "T"
-}
--}
-
--- | Performs a padding as a preprocess during a convolution.
---
--- Similar to FusedResizeAndPadConv2d, this op allows for an optimized
--- implementation where the spatial padding transformation stage is fused with the
--- im2col lookup, but in this case without the bilinear filtering required for
--- resizing. Fusing the padding prevents the need to write out the intermediate
--- results as whole tensors, reducing memory pressure, and we can get some latency
--- gains by merging the transformation calculations.
--- The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
--- order is used instead.
--- Internally this op uses a single per-graph scratch buffer, which means that it
--- will block if multiple versions are being run in parallel. This is because this
--- operator is primarily an optimization to minimize memory usage.
-fusedPadConv2D :: forall v1 v2 v3 t . (TensorType t, OneOf '[Data.Word.Word16,
-                                                             Double,
-                                                             Float] t) =>
-                  Tensor v1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`.
-                  -> Tensor v2 Data.Int.Int32 -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of
-                                              -- rows must be the same as the rank of `input`.
-                  -> Tensor v3 t -- ^ __filter__: 4-D with shape
-                                 -- `[filter_height, filter_width, in_channels, out_channels]`.
-                  -> Tensor Value t -- ^ __output__
-fusedPadConv2D input paddings filter | eqLengthGuard [] =
-    buildOp (opDef "FusedPadConv2D"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input paddings filter
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } }
-  name: "mode"
-  type: "string"
-}
-attr {
-  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format."
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`."
-  name: "paddings"
-  type: DT_INT32
-}
-input_arg {
-  description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
-  name: "filter"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | For each key, assigns the respective value to the specified component.
---
--- If a key is not found in the barrier, this operation will create a new
--- incomplete element. If a key is found in the barrier, and the element
--- already has a value at component_index, this operation will fail with
--- INVALID_ARGUMENT, and leave the barrier in an undefined state.
-barrierInsertMany :: forall v2 v3 t . (TensorType t) =>
-                     Data.Int.Int64 -- ^ __component_index__: The component of the barrier elements that is being assigned.
-                     -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier.
-                     -> Tensor v2 Data.ByteString.ByteString -- ^ __keys__: A one-dimensional tensor of keys, with length n.
-                     -> Tensor v3 t -- ^ __values__: An any-dimensional tensor of values, which are associated with the
-                                    -- respective keys. The 0th dimension must have length n.
-                     -> Build (ControlNode)
-barrierInsertMany component_index handle keys values | eqLengthGuard [] =
-    buildOp (opDef "BarrierInsertMany"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "component_index" .~ component_index)
-        handle keys values
-{-
-attr { name: "T" type: "type" }
-attr {
-  description: "The component of the barrier elements that is being assigned."
-  name: "component_index"
-  type: "int"
-}
-input_arg {
-  description: "The handle to a barrier."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "A one-dimensional tensor of keys, with length n."
-  name: "keys"
-  type: DT_STRING
-}
-input_arg {
-  description: "An any-dimensional tensor of values, which are associated with the\nrespective keys. The 0th dimension must have length n."
-  name: "values"
-  type_attr: "T"
-}
--}
-
--- | Raise a exception to abort the process when called.
---
--- Returns nothing but an exception.
-abort :: ControlNode
-abort  | eqLengthGuard [] =
-    buildOp (opDef "Abort")
-        
-{-
-attr {
-  default_value { s: "" }
-  description: "A string which is the message associated with the exception."
-  name: "error_msg"
-  type: "string"
-}
--}
-
--- | Performs max pooling on the input and outputs both max values and indices.
---
--- The indices in `argmax` are flattened, so that a maximum value at position
--- `[b, y, x, c]` becomes flattened index
--- `((b * height + y) * width + x) * channels + c`.
-maxPoolWithArgmax :: forall v1 targmax t . (TensorType targmax,
-                                            OneOf '[Data.Int.Int32,
-                                                    Data.Int.Int64] targmax,
-                                            TensorType t,
-                                            OneOf '[Data.Word.Word16,
-                                                    Float] t) =>
-                     Tensor v1 t -- ^ __input__: 4-D with shape `[batch, height, width, channels]`.  Input to pool over.
-                     -> (Tensor Value t, Tensor Value targmax)
-                     -- ^ (__output__, __argmax__)
-                     --
-                     -- * __output__: The max pooled output tensor.
-                     --
-                     -- * __argmax__: 4-D.  The flattened indices of the max values chosen for each output.
-maxPoolWithArgmax input | eqLengthGuard [] =
-    buildOp (opDef "MaxPoolWithArgmax"
-             & opAttr "Targmax" .~ tensorType (undefined :: targmax)
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  description: "The size of the window for each dimension of the input tensor."
-  has_minimum: true
-  minimum: 4
-  name: "ksize"
-  type: "list(int)"
-}
-attr {
-  description: "The stride of the sliding window for each dimension of the\ninput tensor."
-  has_minimum: true
-  minimum: 4
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT64 }
-  name: "Targmax"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`.  Input to pool over."
-  name: "input"
-  type_attr: "T"
-}
-output_arg {
-  description: "The max pooled output tensor."
-  name: "output"
-  type_attr: "T"
-}
-output_arg {
-  description: "4-D.  The flattened indices of the max values chosen for each output."
-  name: "argmax"
-  type_attr: "Targmax"
-}
--}
-
--- | Creates or finds a child frame, and makes `data` available to the child frame.
---
--- The unique `frame_name` is used by the `Executor` to identify frames. If
--- `is_constant` is true, `output` is a constant in the child frame; otherwise
--- it may be changed in the child frame. At most `parallel_iterations` iterations
--- are run in parallel in the child frame.
-refEnter :: forall t . (TensorType t) =>
-            Tensor Ref t -- ^ __data__: The tensor to be made available to the child frame.
-            -> Build (Tensor Ref t) -- ^ __output__: The same tensor as `data`.
-refEnter data' | eqLengthGuard [] =
-    buildOp (opDef "RefEnter"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        data'
-{-
-attr { name: "T" type: "type" }
-attr {
-  description: "The name of the child frame."
-  name: "frame_name"
-  type: "string"
-}
-attr {
-  default_value { b: false }
-  description: "If true, the output is constant within the child frame."
-  name: "is_constant"
-  type: "bool"
-}
-attr {
-  default_value { i: 10 }
-  description: "The number of iterations allowed to run in parallel."
-  name: "parallel_iterations"
-  type: "int"
-}
-input_arg {
-  description: "The tensor to be made available to the child frame."
-  is_ref: true
-  name: "data"
-  type_attr: "T"
-}
-output_arg {
-  description: "The same tensor as `data`."
-  is_ref: true
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Dequantize the 'input' tensor into a float Tensor.
---
--- [min_range, max_range] are scalar floats that specify the range for
--- the 'input' data. The 'mode' attribute controls exactly which calculations are
--- used to convert the float values to their quantized equivalents.
--- 
--- In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
--- 
--- ```
--- if T == qint8, in[i] += (range(T) + 1)/ 2.0
--- out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
--- ```
--- here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
--- 
--- *MIN_COMBINED Mode Example*
--- 
--- If the input comes from a QuantizedRelu6, the output type is
--- quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
--- 0-6.  The min_range and max_range values are therefore 0.0 and 6.0.
--- Dequantize on quint8 will take each value, cast to float, and multiply
--- by 6 / 255.
--- Note that if quantizedtype is qint8, the operation will additionally add
--- each value by 128 prior to casting.
--- 
--- If the mode is 'MIN_FIRST', then this approach is used:
--- 
--- ```
--- number_of_steps = 1 << (# of bits in T)
--- range_adjust = number_of_steps / (number_of_steps - 1)
--- range = (range_max - range_min) * range_adjust
--- range_scale = range / number_of_steps
--- const double offset_input = static_cast<double>(input) - lowest_quantized;
--- result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
--- ```
-dequantize :: forall v1 v2 v3 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                         Data.Int.Int32,
-                                                         Data.Word.Word16,
-                                                         Data.Word.Word8] t) =>
-              Tensor v1 t -- ^ __input__
-              -> Tensor v2 Float -- ^ __min_range__: The minimum scalar value possibly produced for the input.
-              -> Tensor v3 Float -- ^ __max_range__: The maximum scalar value possibly produced for the input.
-              -> Tensor Value Float -- ^ __output__
-dequantize input min_range max_range | eqLengthGuard [] =
-    buildOp (opDef "Dequantize"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input min_range max_range
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { s: "MIN_COMBINED" s: "MIN_FIRST" } }
-  default_value { s: "MIN_COMBINED" }
-  name: "mode"
-  type: "string"
-}
-input_arg { name: "input" type_attr: "T" }
-input_arg {
-  description: "The minimum scalar value possibly produced for the input."
-  name: "min_range"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The maximum scalar value possibly produced for the input."
-  name: "max_range"
-  type: DT_FLOAT
-}
-output_arg { name: "output" type: DT_FLOAT }
--}
-
--- | Draw bounding boxes on a batch of images.
---
--- Outputs a copy of `images` but draws on top of the pixels zero or more bounding
--- boxes specified by the locations in `boxes`. The coordinates of the each
--- bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
--- bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
--- height of the underlying image.
--- 
--- For example, if an image is 100 x 200 pixels and the bounding box is
--- `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the
--- bounding box will be `(10, 40)` to `(50, 180)`.
--- 
--- Parts of the bounding box may fall outside the image.
-drawBoundingBoxes :: forall v1 v2 t . (TensorType t, OneOf '[Data.Word.Word16,
-                                                             Float] t) =>
-                     Tensor v1 t -- ^ __images__: 4-D with shape `[batch, height, width, depth]`. A batch of images.
-                     -> Tensor v2 Float -- ^ __boxes__: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
-                                        -- boxes.
-                     -> Tensor Value t -- ^ __output__: 4-D with the same shape as `images`. The batch of input images with
-                     -- bounding boxes drawn on the images.
-drawBoundingBoxes images boxes | eqLengthGuard [] =
-    buildOp (opDef "DrawBoundingBoxes"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        images boxes
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_HALF } }
-  default_value { type: DT_FLOAT }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, depth]`. A batch of images."
-  name: "images"
-  type_attr: "T"
-}
-input_arg {
-  description: "3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding\nboxes."
-  name: "boxes"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "4-D with the same shape as `images`. The batch of input images with\nbounding boxes drawn on the images."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | 
-
-tensorArraySplit :: forall v2 v3 v4 t . (TensorType t) =>
-                    Tensor Ref Data.ByteString.ByteString -- ^ __handle__
-                    -> Tensor v2 t -- ^ __value__
-                    -> Tensor v3 Data.Int.Int64 -- ^ __lengths__
-                    -> Tensor v4 Float -- ^ __flow_in__
-                    -> Build (Tensor Value Float) -- ^ __flow_out__
-tensorArraySplit handle value lengths flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArraySplit"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        handle value lengths flow_in
-{-
-attr { name: "T" type: "type" }
-input_arg { is_ref: true name: "handle" type: DT_STRING }
-input_arg { name: "value" type_attr: "T" }
-input_arg { name: "lengths" type: DT_INT64 }
-input_arg { name: "flow_in" type: DT_FLOAT }
-output_arg { name: "flow_out" type: DT_FLOAT }
--}
-
--- | Converts each string in the input Tensor to its hash mod by a number of buckets.
---
--- The hash function is deterministic on the content of the string within the
--- process and will never change. However, it is not suitable for cryptography.
--- This function may be used when CPU time is scarce and inputs are trusted or
--- unimportant. There is a risk of adversaries constructing inputs that all hash
--- to the same bucket. To prevent this problem, use a strong hash function with
--- `tf.string_to_hash_bucket_strong`.
-stringToHashBucketFast :: Data.Int.Int64 -- ^ __num_buckets__: The number of buckets.
-                          -> Tensor v1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket.
-                          -> Tensor Value Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`.
-stringToHashBucketFast num_buckets input | eqLengthGuard [] =
-    buildOp (opDef "StringToHashBucketFast"
-             & opAttr "num_buckets" .~ num_buckets)
-        input
-{-
-attr {
-  description: "The number of buckets."
-  has_minimum: true
-  minimum: 1
-  name: "num_buckets"
-  type: "int"
-}
-input_arg {
-  description: "The strings to assign a hash bucket."
-  name: "input"
-  type: DT_STRING
-}
-output_arg {
-  description: "A Tensor of the same shape as the input `string_tensor`."
-  name: "output"
-  type: DT_INT64
-}
--}
-
--- | 
-
-tensorArrayScatter :: forall v2 v3 v4 t . (TensorType t) =>
-                      Tensor Ref Data.ByteString.ByteString -- ^ __handle__
-                      -> Tensor v2 Data.Int.Int32 -- ^ __indices__
-                      -> Tensor v3 t -- ^ __value__
-                      -> Tensor v4 Float -- ^ __flow_in__
-                      -> Build (Tensor Value Float) -- ^ __flow_out__
-tensorArrayScatter handle indices value flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayScatter"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        handle indices value flow_in
-{-
-attr { name: "T" type: "type" }
-input_arg { is_ref: true name: "handle" type: DT_STRING }
-input_arg { name: "indices" type: DT_INT32 }
-input_arg { name: "value" type_attr: "T" }
-input_arg { name: "flow_in" type: DT_FLOAT }
-output_arg { name: "flow_out" type: DT_FLOAT }
--}
-
--- | Returns a one-hot tensor.
---
--- The locations represented by indices in `indices` take value `on_value`,
--- while all other locations take value `off_value`.
--- 
--- If the input `indices` is rank `N`, the output will have rank `N+1`,
--- The new axis is created at dimension `axis` (default: the new axis is
--- appended at the end).
--- 
--- If `indices` is a scalar the output shape will be a vector of length `depth`.
--- 
--- If `indices` is a vector of length `features`, the output shape will be:
--- ```
---   features x depth if axis == -1
---   depth x features if axis == 0
--- ```
--- 
--- If `indices` is a matrix (batch) with shape `[batch, features]`,
--- the output shape will be:
--- ```
---   batch x features x depth if axis == -1
---   batch x depth x features if axis == 1
---   depth x batch x features if axis == 0
--- ```
--- 
--- 
--- Examples
--- =========
--- 
--- Suppose that
--- 
--- ```
---   indices = [0, 2, -1, 1]
---   depth = 3
---   on_value = 5.0
---   off_value = 0.0
---   axis = -1
--- ```
--- 
--- Then output is `[4 x 3]`:
--- 
---     ```output =
---       [5.0 0.0 0.0]  // one_hot(0)
---       [0.0 0.0 5.0]  // one_hot(2)
---       [0.0 0.0 0.0]  // one_hot(-1)
---       [0.0 5.0 0.0]  // one_hot(1)
---     ```
--- 
--- Suppose that
--- 
--- ```
---   indices = [0, 2, -1, 1]
---   depth = 3
---   on_value = 0.0
---   off_value = 3.0
---   axis = 0
--- ```
--- 
--- Then output is `[3 x 4]`:
--- 
---     ```output =
---       [0.0 3.0 3.0 3.0]
---       [3.0 3.0 3.0 0.0]
---       [3.0 3.0 3.0 3.0]
---       [3.0 0.0 3.0 3.0]
---     //  ^                one_hot(0)
---     //      ^            one_hot(2)
---     //          ^        one_hot(-1)
---     //              ^    one_hot(1)
---     ```
--- Suppose that
--- 
--- ```
---   indices = [[0, 2], [1, -1]]
---   depth = 3
---   on_value = 1.0
---   off_value = 0.0
---   axis = -1
--- ```
--- 
--- Then output is `[2 x 2 x 3]`:
--- 
---     ```output =
---       [
---         [1.0, 0.0, 0.0]  // one_hot(0)
---         [0.0, 0.0, 1.0]  // one_hot(2)
---       ][
---         [0.0, 1.0, 0.0]  // one_hot(1)
---         [0.0, 0.0, 0.0]  // one_hot(-1)
---       ]```
-oneHot :: forall v1 v2 v3 v4 t tI . (TensorType t, TensorType tI,
-                                     OneOf '[Data.Int.Int32, Data.Int.Int64,
-                                             Data.Word.Word8] tI) =>
-          Tensor v1 tI -- ^ __indices__: A tensor of indices.
-          -> Tensor v2 Data.Int.Int32 -- ^ __depth__: A scalar defining the depth of the one hot dimension.
-          -> Tensor v3 t -- ^ __on_value__: A scalar defining the value to fill in output when `indices[j] = i`.
-          -> Tensor v4 t -- ^ __off_value__: A scalar defining the value to fill in output when `indices[j] != i`.
-          -> Tensor Value t -- ^ __output__: The one-hot tensor.
-oneHot indices depth on_value off_value | eqLengthGuard [] =
-    buildOp (opDef "OneHot"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "TI" .~ tensorType (undefined :: tI))
-        indices depth on_value off_value
-{-
-attr {
-  default_value { i: -1 }
-  description: "The axis to fill (default: -1, a new inner-most axis)."
-  name: "axis"
-  type: "int"
-}
-attr { name: "T" type: "type" }
-attr {
-  allowed_values {
-    list { type: DT_UINT8 type: DT_INT32 type: DT_INT64 }
-  }
-  default_value { type: DT_INT64 }
-  name: "TI"
-  type: "type"
-}
-input_arg {
-  description: "A tensor of indices." name: "indices" type_attr: "TI"
-}
-input_arg {
-  description: "A scalar defining the depth of the one hot dimension."
-  name: "depth"
-  type: DT_INT32
-}
-input_arg {
-  description: "A scalar defining the value to fill in output when `indices[j] = i`."
-  name: "on_value"
-  type_attr: "T"
-}
-input_arg {
-  description: "A scalar defining the value to fill in output when `indices[j] != i`."
-  name: "off_value"
-  type_attr: "T"
-}
-output_arg {
-  description: "The one-hot tensor." name: "output" type_attr: "T"
-}
--}
-
--- | 
-
-batchIFFT3D :: Tensor v1 (Data.Complex.Complex Float) -- ^ __input__
-               -> Tensor Value (Data.Complex.Complex Float) -- ^ __output__
-batchIFFT3D input | eqLengthGuard [] =
-    buildOp (opDef "BatchIFFT3D")
-        input
-{-
-input_arg { name: "input" type: DT_COMPLEX64 }
-output_arg { name: "output" type: DT_COMPLEX64 }
--}
-
--- | Reinterpret the bytes of a string as a vector of numbers.
-
-decodeRaw :: forall v1 out_type . (TensorType out_type, OneOf '[Data.Int.Int16,
-                                                                Data.Int.Int32,
-                                                                Data.Int.Int64,
-                                                                Data.Int.Int8,
-                                                                Data.Word.Word16,
-                                                                Data.Word.Word8,
-                                                                Double,
-                                                                Float] out_type) =>
-             Tensor v1 Data.ByteString.ByteString -- ^ __bytes__: All the elements must have the same length.
-             -> Tensor Value out_type -- ^ __output__: A Tensor with one more dimension than the input `bytes`.  The
-             -- added dimension will have size equal to the length of the elements
-             -- of `bytes` divided by the number of bytes to represent `out_type`.
-decodeRaw bytes | eqLengthGuard [] =
-    buildOp (opDef "DecodeRaw"
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        bytes
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_INT64
-    }
-  }
-  name: "out_type"
-  type: "type"
-}
-attr {
-  default_value { b: true }
-  description: "Whether the input `bytes` are in little-endian order.\nIgnored for `out_type` values that are stored in a single byte like\n`uint8`."
-  name: "little_endian"
-  type: "bool"
-}
-input_arg {
-  description: "All the elements must have the same length."
-  name: "bytes"
-  type: DT_STRING
-}
-output_arg {
-  description: "A Tensor with one more dimension than the input `bytes`.  The\nadded dimension will have size equal to the length of the elements\nof `bytes` divided by the number of bytes to represent `out_type`."
-  name: "output"
-  type_attr: "out_type"
-}
--}
-
--- | 
-
-tensorArrayPack :: forall v2 dtype . (TensorType dtype) =>
-                   Tensor Ref Data.ByteString.ByteString -- ^ __handle__
-                   -> Tensor v2 Float -- ^ __flow_in__
-                   -> Build (Tensor Value dtype) -- ^ __value__
-tensorArrayPack handle flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayPack"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        handle flow_in
-{-
-attr { name: "dtype" type: "type" }
-attr {
-  default_value { shape { unknown_rank: true } }
-  name: "element_shape"
-  type: "shape"
-}
-input_arg { is_ref: true name: "handle" type: DT_STRING }
-input_arg { name: "flow_in" type: DT_FLOAT }
-output_arg { name: "value" type_attr: "dtype" }
--}
-
--- | Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
---
--- accum += grad * grad
--- prox_v = var - lr * grad * (1 / sqrt(accum))
--- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
-applyProximalAdagrad :: forall v3 v4 v5 v6 t . (TensorType t,
-                                                OneOf '[(Data.Complex.Complex Double),
-                                                        (Data.Complex.Complex Float),
-                                                        Data.Int.Int16,
-                                                        Data.Int.Int32,
-                                                        Data.Int.Int64,
-                                                        Data.Int.Int8,
-                                                        Data.Word.Word16,
-                                                        Data.Word.Word8, Double,
-                                                        Float] t) =>
-                        Tensor Ref t -- ^ __var__: Should be from a Variable().
-                        -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
-                        -> Tensor v3 t -- ^ __lr__: Scaling factor. Must be a scalar.
-                        -> Tensor v4 t -- ^ __l1__: L1 regularization. Must be a scalar.
-                        -> Tensor v5 t -- ^ __l2__: L2 regularization. Must be a scalar.
-                        -> Tensor v6 t -- ^ __grad__: The gradient.
-                        -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-applyProximalAdagrad var accum lr l1 l2 grad | eqLengthGuard [] =
-    buildOp (opDef "ApplyProximalAdagrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        var accum lr l1 l2 grad
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "accum"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "L1 regularization. Must be a scalar."
-  name: "l1"
-  type_attr: "T"
-}
-input_arg {
-  description: "L2 regularization. Must be a scalar."
-  name: "l2"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Applies a sparse gradient to a given accumulator. Does not add if local_step is
---
--- lesser than the accumulator's global_step.
-sparseAccumulatorApplyGradient :: forall v2 v3 v4 v5 dtype . (TensorType dtype,
-                                                              OneOf '[(Data.Complex.Complex Double),
-                                                                      (Data.Complex.Complex Float),
-                                                                      Data.Int.Int16,
-                                                                      Data.Int.Int32,
-                                                                      Data.Int.Int64,
-                                                                      Data.Int.Int8,
-                                                                      Data.Word.Word16,
-                                                                      Data.Word.Word8,
-                                                                      Double,
-                                                                      Float] dtype) =>
-                                  Bool -- ^ __has_known_shape__: Boolean indicating whether gradient_shape is unknown, in which
-                                       -- case the input is ignored during validation.
-                                  -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a accumulator.
-                                  -> Tensor v2 Data.Int.Int64 -- ^ __local_step__: The local_step value at which the sparse gradient was computed.
-                                  -> Tensor v3 Data.Int.Int64 -- ^ __gradient_indices__: Indices of the sparse gradient to be accumulated. Must be a
-                                                              -- vector.
-                                  -> Tensor v4 dtype -- ^ __gradient_values__: Values are the non-zero slices of the gradient, and must have
-                                                     -- the same first dimension as indices, i.e., the nnz represented by indices and
-                                                     -- values must be consistent.
-                                  -> Tensor v5 Data.Int.Int64 -- ^ __gradient_shape__: Shape of the sparse gradient to be accumulated.
-                                  -> Build (ControlNode)
-sparseAccumulatorApplyGradient has_known_shape handle local_step
-                               gradient_indices gradient_values
-                               gradient_shape | eqLengthGuard [] =
-    buildOp (opDef "SparseAccumulatorApplyGradient"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "has_known_shape" .~ has_known_shape)
-        handle local_step gradient_indices gradient_values gradient_shape
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  description: "Boolean indicating whether gradient_shape is unknown, in which\ncase the input is ignored during validation."
-  name: "has_known_shape"
-  type: "bool"
-}
-input_arg {
-  description: "The handle to a accumulator."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "The local_step value at which the sparse gradient was computed."
-  name: "local_step"
-  type: DT_INT64
-}
-input_arg {
-  description: "Indices of the sparse gradient to be accumulated. Must be a\nvector."
-  name: "gradient_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "Values are the non-zero slices of the gradient, and must have\nthe same first dimension as indices, i.e., the nnz represented by indices and\nvalues must be consistent."
-  name: "gradient_values"
-  type_attr: "dtype"
-}
-input_arg {
-  description: "Shape of the sparse gradient to be accumulated."
-  name: "gradient_shape"
-  type: DT_INT64
-}
--}
-
--- | Returns x + y element-wise.
---
--- *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-add :: forall v1 v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                               (Data.Complex.Complex Float),
-                                               Data.ByteString.ByteString,
-                                               Data.Int.Int16, Data.Int.Int32,
-                                               Data.Int.Int64, Data.Int.Int8,
-                                               Data.Word.Word16,
-                                               Data.Word.Word8, Double,
-                                               Float] t) =>
-       Tensor v1 t -- ^ __x__
-       -> Tensor v2 t -- ^ __y__
-       -> Tensor Value t -- ^ __z__
-add x y | eqLengthGuard [] =
-    buildOp (opDef "Add"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_STRING
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Computes softsign: `features / (abs(features) + 1)`.
-
-softsign :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int16, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Data.Word.Word16,
-                                                 Data.Word.Word8, Double,
-                                                 Float] t) =>
-            Tensor v1 t -- ^ __features__
-            -> Tensor Value t -- ^ __activations__
-softsign features | eqLengthGuard [] =
-    buildOp (opDef "Softsign"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "features" type_attr: "T" }
-output_arg { name: "activations" type_attr: "T" }
--}
-
--- | 
-
-tensorArrayRead :: forall v2 v3 dtype . (TensorType dtype) =>
-                   Tensor Ref Data.ByteString.ByteString -- ^ __handle__
-                   -> Tensor v2 Data.Int.Int32 -- ^ __index__
-                   -> Tensor v3 Float -- ^ __flow_in__
-                   -> Build (Tensor Value dtype) -- ^ __value__
-tensorArrayRead handle index flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayRead"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        handle index flow_in
-{-
-attr { name: "dtype" type: "type" }
-input_arg { is_ref: true name: "handle" type: DT_STRING }
-input_arg { name: "index" type: DT_INT32 }
-input_arg { name: "flow_in" type: DT_FLOAT }
-output_arg { name: "value" type_attr: "dtype" }
--}
-
--- | Applies sparse subtraction between `updates` and individual values or slices
---
--- within a given variable according to `indices`.
--- 
--- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
--- 
--- `indices` must be integer tensor, containing indices into `ref`.
--- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
--- 
--- The innermost dimension of `indices` (with length `K`) corresponds to
--- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
--- dimension of `ref`.
--- 
--- `updates` is `Tensor` of rank `Q-1+P-K` with shape:
--- 
--- ```
--- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
--- ```
--- 
--- For example, say we want to subtract 4 scattered elements from a rank-1 tensor
--- with 8 elements. In Python, that subtraction would look like this:
--- 
---     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
---     indices = tf.constant([[4], [3], [1], [7]])
---     updates = tf.constant([9, 10, 11, 12])
---     sub = tf.scatter_nd_sub(ref, indices, updates)
---     with tf.Session() as sess:
---       print sess.run(sub)
--- 
--- The resulting update to ref would look like this:
--- 
---     [1, -9, 3, -6, -4, 6, 7, -4]
--- 
--- See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to
--- slices.
-scatterNdSub :: forall v2 v3 t tindices . (TensorType t,
-                                           OneOf '[(Data.Complex.Complex Double),
-                                                   (Data.Complex.Complex Float),
-                                                   Data.Int.Int16,
-                                                   Data.Int.Int32,
-                                                   Data.Int.Int64,
-                                                   Data.Int.Int8,
-                                                   Data.Word.Word16,
-                                                   Data.Word.Word8, Double,
-                                                   Float] t,
-                                           TensorType tindices,
-                                           OneOf '[Data.Int.Int32,
-                                                   Data.Int.Int64] tindices) =>
-                Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node.
-                -> Tensor v2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.
-                                      -- A tensor of indices into ref.
-                -> Tensor v3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values
-                               -- to subtract from ref.
-                -> Build (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want
-                -- to use the updated values after the update is done.
-scatterNdSub ref indices updates | eqLengthGuard [] =
-    buildOp (opDef "ScatterNdSub"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        ref indices updates
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "A mutable Tensor. Should be from a Variable node."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-input_arg {
-  description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto subtract from ref."
-  name: "updates"
-  type_attr: "T"
-}
-output_arg {
-  description: "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done."
-  is_ref: true
-  name: "output_ref"
-  type_attr: "T"
-}
--}
-
--- | Restores a tensor from checkpoint files.
---
--- This is like `Restore` except that restored tensor can be listed as filling
--- only a slice of a larger tensor.  `shape_and_slice` specifies the shape of the
--- larger tensor and the slice that the restored tensor covers.
--- 
--- The `shape_and_slice` input has the same format as the
--- elements of the `shapes_and_slices` input of the `SaveSlices` op.
-restoreSlice :: forall v1 v2 v3 dt . (TensorType dt) =>
-                Tensor v1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from
-                                                     -- which we read the tensor.
-                -> Tensor v2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be
-                                                        -- restored.
-                -> Tensor v3 Data.ByteString.ByteString -- ^ __shape_and_slice__: Scalar. The shapes and slice specifications to use when
-                                                        -- restoring a tensors.
-                -> Tensor Value dt -- ^ __tensor__: The restored tensor.
-restoreSlice file_pattern tensor_name shape_and_slice | eqLengthGuard [] =
-    buildOp (opDef "RestoreSlice"
-             & opAttr "dt" .~ tensorType (undefined :: dt))
-        file_pattern tensor_name shape_and_slice
-{-
-attr {
-  description: "The type of the tensor to be restored."
-  name: "dt"
-  type: "type"
-}
-attr {
-  default_value { i: -1 }
-  description: "Index of file to open first if multiple files match\n`file_pattern`. See the documentation for `Restore`."
-  name: "preferred_shard"
-  type: "int"
-}
-input_arg {
-  description: "Must have a single element. The pattern of the files from\nwhich we read the tensor."
-  name: "file_pattern"
-  type: DT_STRING
-}
-input_arg {
-  description: "Must have a single element. The name of the tensor to be\nrestored."
-  name: "tensor_name"
-  type: DT_STRING
-}
-input_arg {
-  description: "Scalar. The shapes and slice specifications to use when\nrestoring a tensors."
-  name: "shape_and_slice"
-  type: DT_STRING
-}
-output_arg {
-  description: "The restored tensor." name: "tensor" type_attr: "dt"
-}
--}
-
--- | Update 'ref' by adding 'value' to it.
---
--- This operation outputs "ref" after the update is done.
--- This makes it easier to chain operations that need to use the reset value.
-assignAdd :: forall v2 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                                  (Data.Complex.Complex Float),
-                                                  Data.Int.Int16,
-                                                  Data.Int.Int32,
-                                                  Data.Int.Int64, Data.Int.Int8,
-                                                  Data.Word.Word16,
-                                                  Data.Word.Word8, Double,
-                                                  Float] t) =>
-             Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
-             -> Tensor v2 t -- ^ __value__: The value to be added to the variable.
-             -> Build (Tensor Ref t) -- ^ __output_ref__: = Same as "ref".  Returned as a convenience for operations that want
-             -- to use the new value after the variable has been updated.
-assignAdd ref value | eqLengthGuard [] =
-    buildOp (opDef "AssignAdd"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        ref value
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a `Variable` node."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-input_arg {
-  description: "The value to be added to the variable."
-  name: "value"
-  type_attr: "T"
-}
-output_arg {
-  description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been updated."
-  is_ref: true
-  name: "output_ref"
-  type_attr: "T"
-}
--}
-
--- | Returns the truth value of (x > y) element-wise.
---
--- *NOTE*: `Greater` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-greater :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                   Data.Int.Int32,
-                                                   Data.Int.Int64,
-                                                   Data.Int.Int8,
-                                                   Data.Word.Word16,
-                                                   Data.Word.Word8, Double,
-                                                   Float] t) =>
-           Tensor v1 t -- ^ __x__
-           -> Tensor v2 t -- ^ __y__
-           -> Tensor Value Bool -- ^ __z__
-greater x y | eqLengthGuard [] =
-    buildOp (opDef "Greater"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type: DT_BOOL }
--}
-
--- | Returns the number of work units this Reader has finished processing.
-
-readerNumWorkUnitsCompleted :: Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
-                               -> Build (Tensor Value Data.Int.Int64) -- ^ __units_completed__
-readerNumWorkUnitsCompleted reader_handle | eqLengthGuard [] =
-    buildOp (opDef "ReaderNumWorkUnitsCompleted")
-        reader_handle
-{-
-input_arg {
-  description: "Handle to a Reader."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
-output_arg { name: "units_completed" type: DT_INT64 }
--}
-
--- | Gather specific elements from the TensorArray into output `value`.
---
--- All elements selected by `indices` must have the same shape.
-tensorArrayGatherV2 :: forall v1 v2 v3 dtype . (TensorType dtype) =>
-                       Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
-                       -> Tensor v2 Data.Int.Int32 -- ^ __indices__: The locations in the TensorArray from which to read tensor elements.
-                       -> Tensor v3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
-                       -> Tensor Value dtype -- ^ __value__: All of the elements in the TensorArray, concatenated along a new
-                       -- axis (the new dimension 0).
-tensorArrayGatherV2 handle indices flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayGatherV2"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        handle indices flow_in
-{-
-attr {
-  description: "The type of the elem that is returned."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  default_value { shape { unknown_rank: true } }
-  description: "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error."
-  name: "element_shape"
-  type: "shape"
-}
-input_arg {
-  description: "The handle to a TensorArray."
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "The locations in the TensorArray from which to read tensor elements."
-  name: "indices"
-  type: DT_INT32
-}
-input_arg {
-  description: "A float scalar that enforces proper chaining of operations."
-  name: "flow_in"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "All of the elements in the TensorArray, concatenated along a new\naxis (the new dimension 0)."
-  name: "value"
-  type_attr: "dtype"
-}
--}
-
--- | Read an element from the TensorArray into output `value`.
-
-tensorArrayReadV2 :: forall v1 v2 v3 dtype . (TensorType dtype) =>
-                     Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
-                     -> Tensor v2 Data.Int.Int32 -- ^ __index__
-                     -> Tensor v3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
-                     -> Tensor Value dtype -- ^ __value__: The tensor that is read from the TensorArray.
-tensorArrayReadV2 handle index flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayReadV2"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        handle index flow_in
-{-
-attr {
-  description: "The type of the elem that is returned."
-  name: "dtype"
-  type: "type"
-}
-input_arg {
-  description: "The handle to a TensorArray."
-  name: "handle"
-  type: DT_STRING
-}
-input_arg { name: "index" type: DT_INT32 }
-input_arg {
-  description: "A float scalar that enforces proper chaining of operations."
-  name: "flow_in"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The tensor that is read from the TensorArray."
-  name: "value"
-  type_attr: "dtype"
-}
--}
-
--- | Decode web-safe base64-encoded strings.
---
--- Input may or may not have padding at the end. See EncodeBase64 for padding.
--- Web-safe means that input must use - and _ instead of + and /.
-decodeBase64 :: Tensor v1 Data.ByteString.ByteString -- ^ __input__: Base64 strings to decode.
-                -> Tensor Value Data.ByteString.ByteString -- ^ __output__: Decoded strings.
-decodeBase64 input | eqLengthGuard [] =
-    buildOp (opDef "DecodeBase64")
-        input
-{-
-input_arg {
-  description: "Base64 strings to decode."
-  name: "input"
-  type: DT_STRING
-}
-output_arg {
-  description: "Decoded strings." name: "output" type: DT_STRING
-}
--}
-
--- | Push an element onto the tensor_array.
-
-tensorArrayWriteV2 :: forall v1 v2 v3 v4 t . (TensorType t) =>
-                      Tensor v1 Data.ByteString.ByteString -- ^ __handle__: The handle to a TensorArray.
-                      -> Tensor v2 Data.Int.Int32 -- ^ __index__: The position to write to inside the TensorArray.
-                      -> Tensor v3 t -- ^ __value__: The tensor to write to the TensorArray.
-                      -> Tensor v4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations.
-                      -> Tensor Value Float -- ^ __flow_out__: A float scalar that enforces proper chaining of operations.
-tensorArrayWriteV2 handle index value flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayWriteV2"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        handle index value flow_in
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The handle to a TensorArray."
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "The position to write to inside the TensorArray."
-  name: "index"
-  type: DT_INT32
-}
-input_arg {
-  description: "The tensor to write to the TensorArray."
-  name: "value"
-  type_attr: "T"
-}
-input_arg {
-  description: "A float scalar that enforces proper chaining of operations."
-  name: "flow_in"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "A float scalar that enforces proper chaining of operations."
-  name: "flow_out"
-  type: DT_FLOAT
-}
--}
-
--- | Outputs a `Summary` protocol buffer with audio.
---
--- The summary has up to `max_outputs` summary values containing audio. The
--- audio is built from `tensor` which must be 3-D with shape `[batch_size,
--- frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
--- assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
--- 
--- The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
--- build the `tag` of the summary values:
--- 
--- *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
--- *  If `max_outputs` is greater than 1, the summary value tags are
---    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
-audioSummary :: Float -- ^ __sample_rate__: The sample rate of the signal in hertz.
-                -> Tensor v1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values.
-                -> Tensor v2 Float -- ^ __tensor__: 2-D of shape `[batch_size, frames]`.
-                -> Tensor Value Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer.
-audioSummary sample_rate tag tensor | eqLengthGuard [] =
-    buildOp (opDef "AudioSummary"
-             & opAttr "sample_rate" .~ sample_rate)
-        tag tensor
-{-
-attr {
-  description: "The sample rate of the signal in hertz."
-  name: "sample_rate"
-  type: "float"
-}
-attr {
-  default_value { i: 3 }
-  description: "Max number of batch elements to generate audio for."
-  has_minimum: true
-  minimum: 1
-  name: "max_outputs"
-  type: "int"
-}
-input_arg {
-  description: "Scalar. Used to build the `tag` attribute of the summary values."
-  name: "tag"
-  type: DT_STRING
-}
-input_arg {
-  description: "2-D of shape `[batch_size, frames]`."
-  name: "tensor"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "Scalar. Serialized `Summary` protocol buffer."
-  name: "summary"
-  type: DT_STRING
-}
--}
-
--- | Returns which elements of x are finite.
---
--- @compatibility(numpy)
--- Equivalent to np.isfinite
--- @end_compatibility
-isFinite :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16, Double,
-                                                 Float] t) =>
-            Tensor v1 t -- ^ __x__
-            -> Tensor Value Bool -- ^ __y__
-isFinite x | eqLengthGuard [] =
-    buildOp (opDef "IsFinite"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type: DT_BOOL }
--}
-
--- | 
-
-tensorArrayConcat :: forall v2 dtype . (TensorType dtype) =>
-                     Tensor Ref Data.ByteString.ByteString -- ^ __handle__
-                     -> Tensor v2 Float -- ^ __flow_in__
-                     -> Build ((Tensor Value dtype,
-                                Tensor Value Data.Int.Int64))
-                     -- ^ (__value__, __lengths__)
-                     --
-                     -- * __value__
-                     --
-                     -- * __lengths__
-tensorArrayConcat handle flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArrayConcat"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        handle flow_in
-{-
-attr { name: "dtype" type: "type" }
-attr {
-  default_value { shape { unknown_rank: true } }
-  name: "element_shape_except0"
-  type: "shape"
-}
-input_arg { is_ref: true name: "handle" type: DT_STRING }
-input_arg { name: "flow_in" type: DT_FLOAT }
-output_arg { name: "value" type_attr: "dtype" }
-output_arg { name: "lengths" type: DT_INT64 }
--}
-
--- | Computes the sum of elements across dimensions of a SparseTensor.
---
--- This Op takes a SparseTensor and is the sparse counterpart to
--- `tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`
--- instead of a sparse one.
--- 
--- Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
--- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
--- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
--- with length 1.
--- 
--- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
--- with a single element is returned.  Additionally, the axes can be negative,
--- which are interpreted according to the indexing rules in Python.
-sparseReduceSum :: forall v1 v2 v3 v4 t . (TensorType t,
-                                           OneOf '[(Data.Complex.Complex Double),
-                                                   (Data.Complex.Complex Float),
-                                                   Data.Int.Int16,
-                                                   Data.Int.Int32,
-                                                   Data.Int.Int64,
-                                                   Data.Int.Int8,
-                                                   Data.Word.Word16,
-                                                   Data.Word.Word8, Double,
-                                                   Float] t) =>
-                   Tensor v1 Data.Int.Int64 -- ^ __input_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
-                                            -- SparseTensor, possibly not in canonical ordering.
-                   -> Tensor v2 t -- ^ __input_values__: 1-D.  `N` non-empty values corresponding to `input_indices`.
-                   -> Tensor v3 Data.Int.Int64 -- ^ __input_shape__: 1-D.  Shape of the input SparseTensor.
-                   -> Tensor v4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D.  Length-`K` vector containing the reduction axes.
-                   -> Tensor Value t -- ^ __output__: `R-K`-D.  The reduced Tensor.
-sparseReduceSum input_indices input_values input_shape
-                reduction_axes | eqLengthGuard [] =
-    buildOp (opDef "SparseReduceSum"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input_indices input_values input_shape reduction_axes
-{-
-attr {
-  default_value { b: false }
-  description: "If true, retain reduced dimensions with length 1."
-  name: "keep_dims"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
-  name: "input_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  `N` non-empty values corresponding to `input_indices`."
-  name: "input_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  Shape of the input SparseTensor."
-  name: "input_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  Length-`K` vector containing the reduction axes."
-  name: "reduction_axes"
-  type: DT_INT32
-}
-output_arg {
-  description: "`R-K`-D.  The reduced Tensor."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Returns x / y element-wise for real types.
---
--- If `x` and `y` are reals, this will return the floating-point division.
--- 
--- *NOTE*: `Div` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-realDiv :: forall v1 v2 t . (TensorType t,
-                             OneOf '[(Data.Complex.Complex Double),
-                                     (Data.Complex.Complex Float),
-                                     Data.Int.Int16, Data.Int.Int32,
-                                     Data.Int.Int64, Data.Int.Int8,
-                                     Data.Word.Word16, Data.Word.Word8, Double,
-                                     Float] t) => Tensor v1 t -- ^ __x__
-           -> Tensor v2 t -- ^ __y__
-           -> Tensor Value t -- ^ __z__
-realDiv x y | eqLengthGuard [] =
-    buildOp (opDef "RealDiv"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | 
-
-tensorArraySize :: Tensor Ref Data.ByteString.ByteString -- ^ __handle__
-                   -> Tensor v2 Float -- ^ __flow_in__
-                   -> Build (Tensor Value Data.Int.Int32) -- ^ __size__
-tensorArraySize handle flow_in | eqLengthGuard [] =
-    buildOp (opDef "TensorArraySize")
-        handle flow_in
-{-
-input_arg { is_ref: true name: "handle" type: DT_STRING }
-input_arg { name: "flow_in" type: DT_FLOAT }
-output_arg { name: "size" type: DT_INT32 }
--}
-
--- | Adds `bias` to `value`.
---
--- This is a deprecated version of BiasAdd and will be soon removed.
--- 
--- This is a special case of `tf.add` where `bias` is restricted to be 1-D.
--- Broadcasting is supported, so `value` may have any number of dimensions.
-biasAddV1 :: forall v1 v2 t . (TensorType t,
-                               OneOf '[(Data.Complex.Complex Double),
-                                       (Data.Complex.Complex Float),
-                                       Data.Int.Int16, Data.Int.Int32,
-                                       Data.Int.Int64, Data.Int.Int8,
-                                       Data.Word.Word16, Data.Word.Word8,
-                                       Double, Float] t) =>
-             Tensor v1 t -- ^ __value__: Any number of dimensions.
-             -> Tensor v2 t -- ^ __bias__: 1-D with size the last dimension of `value`.
-             -> Tensor Value t -- ^ __output__: Broadcasted sum of `value` and `bias`.
-biasAddV1 value bias | eqLengthGuard [] =
-    buildOp (opDef "BiasAddV1"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        value bias
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Any number of dimensions."
-  name: "value"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D with size the last dimension of `value`."
-  name: "bias"
-  type_attr: "T"
-}
-output_arg {
-  description: "Broadcasted sum of `value` and `bias`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Returns the truth value of x OR y element-wise.
---
--- *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-logicalOr :: Tensor v1 Bool -- ^ __x__
-             -> Tensor v2 Bool -- ^ __y__
-             -> Tensor Value Bool -- ^ __z__
-logicalOr x y | eqLengthGuard [] =
-    buildOp (opDef "LogicalOr")
-        x y
-{-
-input_arg { name: "x" type: DT_BOOL }
-input_arg { name: "y" type: DT_BOOL }
-output_arg { name: "z" type: DT_BOOL }
--}
-
--- | Push an element onto the stack.
-
-stackPush :: forall v2 t . (TensorType t) =>
-             Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a stack.
-             -> Tensor v2 t -- ^ __elem__: The tensor to be pushed onto the stack.
-             -> Build (Tensor Value t) -- ^ __output__: The same tensor as the input 'elem'.
-stackPush handle elem | eqLengthGuard [] =
-    buildOp (opDef "StackPush"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        handle elem
-{-
-attr { name: "T" type: "type" }
-attr {
-  default_value { b: false }
-  description: "Swap `elem` to CPU. Default to false."
-  name: "swap_memory"
-  type: "bool"
-}
-input_arg {
-  description: "The handle to a stack."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "The tensor to be pushed onto the stack."
-  name: "elem"
-  type_attr: "T"
-}
-output_arg {
-  description: "The same tensor as the input \'elem\'."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes Quantized Rectified Linear: `max(features, 0)`
-
-quantizedRelu :: forall v1 v2 v3 tinput out_type . (TensorType tinput,
-                                                    OneOf '[Data.Int.Int16,
-                                                            Data.Int.Int32,
-                                                            Data.Word.Word16,
-                                                            Data.Word.Word8] tinput,
-                                                    TensorType out_type,
-                                                    OneOf '[Data.Int.Int16,
-                                                            Data.Int.Int32,
-                                                            Data.Word.Word16,
-                                                            Data.Word.Word8] out_type) =>
-                 Tensor v1 tinput -- ^ __features__
-                 -> Tensor v2 Float -- ^ __min_features__: The float value that the lowest quantized value represents.
-                 -> Tensor v3 Float -- ^ __max_features__: The float value that the highest quantized value represents.
-                 -> (Tensor Value out_type, Tensor Value Float,
-                     Tensor Value Float)
-                 -- ^ (__activations__, __min_activations__, __max_activations__)
-                 --
-                 -- * __activations__: Has the same output shape as "features".
-                 --
-                 -- * __min_activations__: The float value that the lowest quantized value represents.
-                 --
-                 -- * __max_activations__: The float value that the highest quantized value represents.
-quantizedRelu features min_features max_features | eqLengthGuard [] =
-    buildOp (opDef "QuantizedRelu"
-             & opAttr "Tinput" .~ tensorType (undefined :: tinput)
-             & opAttr "out_type" .~ tensorType (undefined :: out_type))
-        features min_features max_features
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  name: "Tinput"
-  type: "type"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT16
-      type: DT_QUINT16
-      type: DT_QINT32
-    }
-  }
-  default_value { type: DT_QUINT8 }
-  name: "out_type"
-  type: "type"
-}
-input_arg { name: "features" type_attr: "Tinput" }
-input_arg {
-  description: "The float value that the lowest quantized value represents."
-  name: "min_features"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The float value that the highest quantized value represents."
-  name: "max_features"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "Has the same output shape as \"features\"."
-  name: "activations"
-  type_attr: "out_type"
-}
-output_arg {
-  description: "The float value that the lowest quantized value represents."
-  name: "min_activations"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The float value that the highest quantized value represents."
-  name: "max_activations"
-  type: DT_FLOAT
-}
--}
-
--- | Return the reduction indices for computing gradients of s0 op s1 with broadcast.
---
--- This is typically used by gradient computations for a broadcasting operation.
-broadcastGradientArgs :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32,
-                                                                 Data.Int.Int64] t) =>
-                         Tensor v1 t -- ^ __s0__
-                         -> Tensor v2 t -- ^ __s1__
-                         -> (Tensor Value t, Tensor Value t)
-                         -- ^ (__r0__, __r1__)
-                         --
-                         -- * __r0__
-                         --
-                         -- * __r1__
-broadcastGradientArgs s0 s1 | eqLengthGuard [] =
-    buildOp (opDef "BroadcastGradientArgs"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        s0 s1
-{-
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "s0" type_attr: "T" }
-input_arg { name: "s1" type_attr: "T" }
-output_arg { name: "r0" type_attr: "T" }
-output_arg { name: "r1" type_attr: "T" }
--}
-
--- | Finds unique elements in a 1-D tensor.
---
--- This operation returns a tensor `y` containing all of the unique elements of `x`
--- sorted in the same order that they occur in `x`. This operation also returns a
--- tensor `idx` the same size as `x` that contains the index of each value of `x`
--- in the unique output `y`. Finally, it returns a third tensor `count` that
--- contains the count of each element of `y` in `x`. In other words:
--- 
--- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
--- 
--- For example:
--- 
--- ```prettyprint
--- # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
--- y, idx, count = unique_with_counts(x)
--- y ==> [1, 2, 4, 7, 8]
--- idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
--- count ==> [2, 1, 3, 1, 2]
--- ```
-uniqueWithCounts :: forall v1 t out_idx . (TensorType t, TensorType out_idx,
-                                           OneOf '[Data.Int.Int32,
-                                                   Data.Int.Int64] out_idx) =>
-                    Tensor v1 t -- ^ __x__: 1-D.
-                    -> (Tensor Value t, Tensor Value out_idx,
-                        Tensor Value out_idx) -- ^ (__y__, __idx__, __count__)
-                    --
-                    -- * __y__: 1-D.
-                    --
-                    -- * __idx__: 1-D.
-                    --
-                    -- * __count__: 1-D.
-uniqueWithCounts x | eqLengthGuard [] =
-    buildOp (opDef "UniqueWithCounts"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "out_idx" .~ tensorType (undefined :: out_idx))
-        x
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "out_idx"
-  type: "type"
-}
-input_arg { description: "1-D." name: "x" type_attr: "T" }
-output_arg { description: "1-D." name: "y" type_attr: "T" }
-output_arg { description: "1-D." name: "idx" type_attr: "out_idx" }
-output_arg {
-  description: "1-D." name: "count" type_attr: "out_idx"
-}
--}
-
--- | Returns element-wise remainder of division. This emulates C semantics where
---
--- true, this follows C semantics in that the result here is consistent
--- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
--- 
--- *NOTE*: `Mod` supports broadcasting. More about broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-truncateMod :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int32,
-                                                       Data.Int.Int64, Double,
-                                                       Float] t) =>
-               Tensor v1 t -- ^ __x__
-               -> Tensor v2 t -- ^ __y__
-               -> Tensor Value t -- ^ __z__
-truncateMod x y | eqLengthGuard [] =
-    buildOp (opDef "TruncateMod"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Returns the gradient of `StridedSlice`.
---
--- Since `StridedSlice` cuts out pieces of its `input` which is size
--- `shape`, its gradient will have the same shape (which is passed here
--- as `shape`). The gradient will be zero in any element that the slice
--- does not select.
--- 
--- Arguments are the same as StridedSliceGrad with the exception that
--- `dy` is the input gradient to be propagated and `shape` is the
--- shape of `StridedSlice`'s `input`.
-stridedSliceGrad :: forall v1 v2 v3 v4 v5 t index . (TensorType t,
-                                                     TensorType index,
-                                                     OneOf '[Data.Int.Int32,
-                                                             Data.Int.Int64] index) =>
-                    Tensor v1 index -- ^ __shape__
-                    -> Tensor v2 index -- ^ __begin__
-                    -> Tensor v3 index -- ^ __end__
-                    -> Tensor v4 index -- ^ __strides__
-                    -> Tensor v5 t -- ^ __dy__
-                    -> Tensor Value t -- ^ __output__
-stridedSliceGrad shape begin end strides dy | eqLengthGuard [] =
-    buildOp (opDef "StridedSliceGrad"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Index" .~ tensorType (undefined :: index))
-        shape begin end strides dy
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Index"
-  type: "type"
-}
-attr { default_value { i: 0 } name: "begin_mask" type: "int" }
-attr { default_value { i: 0 } name: "end_mask" type: "int" }
-attr { default_value { i: 0 } name: "ellipsis_mask" type: "int" }
-attr { default_value { i: 0 } name: "new_axis_mask" type: "int" }
-attr {
-  default_value { i: 0 } name: "shrink_axis_mask" type: "int"
-}
-input_arg { name: "shape" type_attr: "Index" }
-input_arg { name: "begin" type_attr: "Index" }
-input_arg { name: "end" type_attr: "Index" }
-input_arg { name: "strides" type_attr: "Index" }
-input_arg { name: "dy" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Performs fractional average pooling on the input.
---
--- Fractional average pooling is similar to Fractional max pooling in the pooling
--- region generation step. The only difference is that after pooling regions are
--- generated, a mean operation is performed instead of a max operation in each
--- pooling region.
-fractionalAvgPool :: forall v1 t . (TensorType t, OneOf '[Data.Int.Int32,
-                                                          Data.Int.Int64,
-                                                          Double, Float] t) =>
-                     Tensor v1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`.
-                     -> (Tensor Value t, Tensor Value Data.Int.Int64,
-                         Tensor Value Data.Int.Int64)
-                     -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__)
-                     --
-                     -- * __output__: output tensor after fractional avg pooling.
-                     --
-                     -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient.
-                     --
-                     -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient.
-fractionalAvgPool value | eqLengthGuard [] =
-    buildOp (opDef "FractionalAvgPool"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        value
-{-
-attr {
-  description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively."
-  has_minimum: true
-  minimum: 4
-  name: "pooling_ratio"
-  type: "list(float)"
-}
-attr {
-  default_value { b: false }
-  description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random."
-  name: "pseudo_random"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index  0  1  2  3  4`\n\n`value  20 5  16 3  7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling."
-  name: "overlapping"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalAvgPool node in the computation graph. Mainly used\nin unit test to make FractionalAvgPool deterministic."
-  name: "deterministic"
-  type: "bool"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "An second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "value"
-  type_attr: "T"
-}
-output_arg {
-  description: "output tensor after fractional avg pooling."
-  name: "output"
-  type_attr: "T"
-}
-output_arg {
-  description: "row pooling sequence, needed to calculate gradient."
-  name: "row_pooling_sequence"
-  type: DT_INT64
-}
-output_arg {
-  description: "column pooling sequence, needed to calculate gradient."
-  name: "col_pooling_sequence"
-  type: DT_INT64
-}
--}
-
--- | Extracts the average sparse gradient in the given SparseConditionalAccumulator,
---
--- provided that sufficient (i.e., more than num_required) gradients have been
--- accumulated. The op will blocks until sufficient gradients have been
--- accumulated. If the accumulator has already aggregated more than num_required
--- gradients, it will return its average of the accumulated gradients.
--- Also automatically increments the recorded global_step in the accumulator by 1,
--- and resets the aggregate to 0.
-sparseAccumulatorTakeGradient :: forall v2 dtype . (TensorType dtype,
-                                                    OneOf '[(Data.Complex.Complex Double),
-                                                            (Data.Complex.Complex Float),
-                                                            Data.Int.Int16,
-                                                            Data.Int.Int32,
-                                                            Data.Int.Int64,
-                                                            Data.Int.Int8,
-                                                            Data.Word.Word16,
-                                                            Data.Word.Word8,
-                                                            Double,
-                                                            Float] dtype) =>
-                                 Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a SparseConditionalAccumulator.
-                                 -> Tensor v2 Data.Int.Int32 -- ^ __num_required__: Number of gradients required before we return an aggregate.
-                                 -> Build ((Tensor Value Data.Int.Int64,
-                                            Tensor Value dtype,
-                                            Tensor Value Data.Int.Int64))
-                                 -- ^ (__indices__, __values__, __shape__)
-                                 --
-                                 -- * __indices__: Indices of the average of the accumulated sparse gradients.
-                                 --
-                                 -- * __values__: Values of the average of the accumulated sparse gradients.
-                                 --
-                                 -- * __shape__: Shape of the average of the accumulated sparse gradients.
-sparseAccumulatorTakeGradient handle num_required | eqLengthGuard [] =
-    buildOp (opDef "SparseAccumulatorTakeGradient"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype))
-        handle num_required
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
-  name: "dtype"
-  type: "type"
-}
-input_arg {
-  description: "The handle to a SparseConditionalAccumulator."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "Number of gradients required before we return an aggregate."
-  name: "num_required"
-  type: DT_INT32
-}
-output_arg {
-  description: "Indices of the average of the accumulated sparse gradients."
-  name: "indices"
-  type: DT_INT64
-}
-output_arg {
-  description: "Values of the average of the accumulated sparse gradients."
-  name: "values"
-  type_attr: "dtype"
-}
-output_arg {
-  description: "Shape of the average of the accumulated sparse gradients."
-  name: "shape"
-  type: DT_INT64
-}
--}
-
--- | Convert JSON-encoded Example records to binary protocol buffer strings.
---
--- This op translates a tensor containing Example records, encoded using
--- the [standard JSON
--- mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),
--- into a tensor containing the same records encoded as binary protocol
--- buffers. The resulting tensor can then be fed to any of the other
--- Example-parsing ops.
-decodeJSONExample :: Tensor v1 Data.ByteString.ByteString -- ^ __json_examples__: Each string is a JSON object serialized according to the JSON
-                                                          -- mapping of the Example proto.
-                     -> Tensor Value Data.ByteString.ByteString -- ^ __binary_examples__: Each string is a binary Example protocol buffer corresponding
-                     -- to the respective element of `json_examples`.
-decodeJSONExample json_examples | eqLengthGuard [] =
-    buildOp (opDef "DecodeJSONExample")
-        json_examples
-{-
-input_arg {
-  description: "Each string is a JSON object serialized according to the JSON\nmapping of the Example proto."
-  name: "json_examples"
-  type: DT_STRING
-}
-output_arg {
-  description: "Each string is a binary Example protocol buffer corresponding\nto the respective element of `json_examples`."
-  name: "binary_examples"
-  type: DT_STRING
-}
--}
-
--- | A placeholder op that passes though `input` when its output is not fed.
-
-placeholderWithDefault :: forall v1 dtype . (TensorType dtype) =>
-                          Shape -- ^ __shape__: The (possibly partial) shape of the tensor.
-                          -> Tensor v1 dtype -- ^ __input__: The default value to produce when `output` is not fed.
-                          -> Tensor Value dtype -- ^ __output__: A placeholder tensor that defaults to `input` if it is not fed.
-placeholderWithDefault shape input | eqLengthGuard [] =
-    buildOp (opDef "PlaceholderWithDefault"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "shape" .~ shape)
-        input
-{-
-attr {
-  description: "The type of elements in the tensor."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  description: "The (possibly partial) shape of the tensor."
-  name: "shape"
-  type: "shape"
-}
-input_arg {
-  description: "The default value to produce when `output` is not fed."
-  name: "input"
-  type_attr: "dtype"
-}
-output_arg {
-  description: "A placeholder tensor that defaults to `input` if it is not fed."
-  name: "output"
-  type_attr: "dtype"
-}
--}
-
--- | Update '*var' according to the Ftrl-proximal scheme.
---
--- accum_new = accum + grad * grad
--- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
--- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
--- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
--- accum = accum_new
-applyFtrl :: forall v4 v5 v6 v7 v8 t . (TensorType t,
-                                        OneOf '[(Data.Complex.Complex Double),
-                                                (Data.Complex.Complex Float),
-                                                Data.Int.Int16, Data.Int.Int32,
-                                                Data.Int.Int64, Data.Int.Int8,
-                                                Data.Word.Word16,
-                                                Data.Word.Word8, Double,
-                                                Float] t) =>
-             Tensor Ref t -- ^ __var__: Should be from a Variable().
-             -> Tensor Ref t -- ^ __accum__: Should be from a Variable().
-             -> Tensor Ref t -- ^ __linear__: Should be from a Variable().
-             -> Tensor v4 t -- ^ __grad__: The gradient.
-             -> Tensor v5 t -- ^ __lr__: Scaling factor. Must be a scalar.
-             -> Tensor v6 t -- ^ __l1__: L1 regulariation. Must be a scalar.
-             -> Tensor v7 t -- ^ __l2__: L2 regulariation. Must be a scalar.
-             -> Tensor v8 t -- ^ __lr_power__: Scaling factor. Must be a scalar.
-             -> Build (Tensor Ref t) -- ^ __out__: Same as "var".
-applyFtrl var accum linear grad lr l1 l2 lr_power | eqLengthGuard [] =
-    buildOp (opDef "ApplyFtrl"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        var accum linear grad lr l1 l2 lr_power
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "var"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "accum"
-  type_attr: "T"
-}
-input_arg {
-  description: "Should be from a Variable()."
-  is_ref: true
-  name: "linear"
-  type_attr: "T"
-}
-input_arg {
-  description: "The gradient." name: "grad" type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr"
-  type_attr: "T"
-}
-input_arg {
-  description: "L1 regulariation. Must be a scalar."
-  name: "l1"
-  type_attr: "T"
-}
-input_arg {
-  description: "L2 regulariation. Must be a scalar."
-  name: "l2"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scaling factor. Must be a scalar."
-  name: "lr_power"
-  type_attr: "T"
-}
-output_arg {
-  description: "Same as \"var\"."
-  is_ref: true
-  name: "out"
-  type_attr: "T"
-}
--}
-
--- | Applies L1 regularization shrink step on the parameters.
-
-sdcaShrinkL1 :: Float -- ^ __l1__: Symmetric l1 regularization strength.
-                -> Float -- ^ __l2__: Symmetric l2 regularization strength. Should be a positive float.
-                -> [Tensor Ref Float] -- ^ __weights__: a list of vectors where each value is the weight associated with a
-                                      -- feature group.
-                -> Build (ControlNode)
-sdcaShrinkL1 l1 l2
-             weights | eqLengthGuard [("num_features", [("weights", length weights)])] =
-    buildOp (opDef "SdcaShrinkL1"
-             & opAttr "l1" .~ l1
-             & opAttr "l2" .~ l2
-             & opAttr "num_features" .~ num_features)
-        weights
-  where
-    num_features = fromIntegral (length weights) :: Int64
-{-
-attr {
-  description: "Number of feature groups to apply shrinking step."
-  has_minimum: true
-  name: "num_features"
-  type: "int"
-}
-attr {
-  description: "Symmetric l1 regularization strength."
-  name: "l1"
-  type: "float"
-}
-attr {
-  description: "Symmetric l2 regularization strength. Should be a positive float."
-  name: "l2"
-  type: "float"
-}
-input_arg {
-  description: "a list of vectors where each value is the weight associated with a\nfeature group."
-  is_ref: true
-  name: "weights"
-  number_attr: "num_features"
-  type: DT_FLOAT
-}
--}
-
--- | Generate a sharded filename. The filename is printf formatted as
---
---    %s-%05d-of-%05d, basename, shard, num_shards.
-shardedFilename :: Tensor v1 Data.ByteString.ByteString -- ^ __basename__
-                   -> Tensor v2 Data.Int.Int32 -- ^ __shard__
-                   -> Tensor v3 Data.Int.Int32 -- ^ __num_shards__
-                   -> Tensor Value Data.ByteString.ByteString -- ^ __filename__
-shardedFilename basename shard num_shards | eqLengthGuard [] =
-    buildOp (opDef "ShardedFilename")
-        basename shard num_shards
-{-
-input_arg { name: "basename" type: DT_STRING }
-input_arg { name: "shard" type: DT_INT32 }
-input_arg { name: "num_shards" type: DT_INT32 }
-output_arg { name: "filename" type: DT_STRING }
--}
-
--- | Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
---
--- Attributes [min; max] define the clamping range for the 'inputs' data.  Op
--- divides this range into 255 steps (total of 256 values), then replaces each
--- 'inputs' value with the closest of the quantized step values.
--- 
--- Quantization is called fake since the output is still in floating point.
-fakeQuantWithMinMaxArgs :: Tensor v1 Float -- ^ __inputs__
-                           -> Tensor Value Float -- ^ __outputs__
-fakeQuantWithMinMaxArgs inputs | eqLengthGuard [] =
-    buildOp (opDef "FakeQuantWithMinMaxArgs")
-        inputs
-{-
-attr { default_value { f: -6.0 } name: "min" type: "float" }
-attr { default_value { f: 6.0 } name: "max" type: "float" }
-input_arg { name: "inputs" type: DT_FLOAT }
-output_arg { name: "outputs" type: DT_FLOAT }
--}
-
--- | Applies sparse addition between `updates` and individual values or slices
---
--- within a given variable according to `indices`.
--- 
--- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
--- 
--- `indices` must be integer tensor, containing indices into `ref`.
--- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
--- 
--- The innermost dimension of `indices` (with length `K`) corresponds to
--- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
--- dimension of `ref`.
--- 
--- `updates` is `Tensor` of rank `Q-1+P-K` with shape:
--- 
--- ```
--- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
--- ```
--- 
--- For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
--- elements. In Python, that addition would look like this:
--- 
---     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
---     indices = tf.constant([[4], [3], [1], [7]])
---     updates = tf.constant([9, 10, 11, 12])
---     add = tf.scatter_nd_add(ref, indices, updates)
---     with tf.Session() as sess:
---       print sess.run(add)
--- 
--- The resulting update to ref would look like this:
--- 
---     [1, 13, 3, 14, 14, 6, 7, 20]
--- 
--- See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to
--- slices.
-scatterNdAdd :: forall v2 v3 t tindices . (TensorType t,
-                                           OneOf '[(Data.Complex.Complex Double),
-                                                   (Data.Complex.Complex Float),
-                                                   Data.Int.Int16,
-                                                   Data.Int.Int32,
-                                                   Data.Int.Int64,
-                                                   Data.Int.Int8,
-                                                   Data.Word.Word16,
-                                                   Data.Word.Word8, Double,
-                                                   Float] t,
-                                           TensorType tindices,
-                                           OneOf '[Data.Int.Int32,
-                                                   Data.Int.Int64] tindices) =>
-                Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node.
-                -> Tensor v2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.
-                                      -- A tensor of indices into ref.
-                -> Tensor v3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values
-                               -- to add to ref.
-                -> Build (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want
-                -- to use the updated values after the update is done.
-scatterNdAdd ref indices updates | eqLengthGuard [] =
-    buildOp (opDef "ScatterNdAdd"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        ref indices updates
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "A mutable Tensor. Should be from a Variable node."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-input_arg {
-  description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to ref."
-  name: "updates"
-  type_attr: "T"
-}
-output_arg {
-  description: "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done."
-  is_ref: true
-  name: "output_ref"
-  type_attr: "T"
-}
--}
-
--- | Returns the number of gradients aggregated in the given accumulators.
-
-accumulatorNumAccumulated :: Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator.
-                             -> Build (Tensor Value Data.Int.Int32) -- ^ __num_accumulated__: The number of gradients aggregated in the given accumulator.
-accumulatorNumAccumulated handle | eqLengthGuard [] =
-    buildOp (opDef "AccumulatorNumAccumulated")
-        handle
-{-
-input_arg {
-  description: "The handle to an accumulator."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
-output_arg {
-  description: "The number of gradients aggregated in the given accumulator."
-  name: "num_accumulated"
-  type: DT_INT32
-}
--}
-
--- | Computes the sum along sparse segments of a tensor divided by the sqrt of N.
---
--- N is the size of the segment being reduced.
--- 
--- Read [the section on
--- Segmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation
--- of segments.
-sparseSegmentSqrtN :: forall v1 v2 v3 t tidx . (TensorType t, OneOf '[Double,
-                                                                      Float] t,
-                                                TensorType tidx,
-                                                OneOf '[Data.Int.Int32,
-                                                        Data.Int.Int64] tidx) =>
-                      Tensor v1 t -- ^ __data__
-                      -> Tensor v2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`.
-                      -> Tensor v3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated.
-                      -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
-                      -- has size `k`, the number of segments.
-sparseSegmentSqrtN data' indices segment_ids | eqLengthGuard [] =
-    buildOp (opDef "SparseSegmentSqrtN"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        data' indices segment_ids
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg { name: "data" type_attr: "T" }
-input_arg {
-  description: "A 1-D tensor. Has same rank as `segment_ids`."
-  name: "indices"
-  type_attr: "Tidx"
-}
-input_arg {
-  description: "A 1-D tensor. Values should be sorted and can be repeated."
-  name: "segment_ids"
-  type: DT_INT32
-}
-output_arg {
-  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | DepthToSpace for tensors of type T.
---
--- Rearranges data from depth into blocks of spatial data.
--- This is the reverse transformation of SpaceToDepth. More specifically,
--- this op outputs a copy of the input tensor where values from the `depth`
--- dimension are moved in spatial blocks to the `height` and `width` dimensions.
--- The attr `block_size` indicates the input block size and how the data is moved.
--- 
---   * Chunks of data of size `block_size * block_size` from depth are rearranged
---     into non-overlapping blocks of size `block_size x block_size`
---   * The width the output tensor is `input_depth * block_size`, whereas the
---     height is `input_height * block_size`.
---   * The depth of the input tensor must be divisible by
---     `block_size * block_size`.
--- 
--- That is, assuming the input is in the shape:
--- `[batch, height, width, depth]`,
--- the shape of the output will be:
--- `[batch, height*block_size, width*block_size, depth/(block_size*block_size)]`
--- 
--- This operation requires that the input tensor be of rank 4, and that
--- `block_size` be >=1 and that `block_size * block_size` be a divisor of the
--- input depth.
--- 
--- This operation is useful for resizing the activations between convolutions
--- (but keeping all data), e.g. instead of pooling. It is also useful for training
--- purely convolutional models.
--- 
--- For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2:
--- 
--- ```prettyprint
--- x = [[[[1, 2, 3, 4]]]]
--- 
--- ```
--- 
--- This operation will output a tensor of shape `[1, 2, 2, 1]`:
--- 
--- ```prettyprint
---    [[[[1], [2]],
---      [[3], [4]]]]
--- ```
--- 
--- Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
--- the corresponding output will have 2x2 elements and will have a depth of
--- 1 channel (1 = `4 / (block_size * block_size)`).
--- The output element shape is `[2, 2, 1]`.
--- 
--- For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
--- 
--- ```prettyprint
--- x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
--- ```
--- 
--- This operation, for block size of 2, will return the following tensor of shape
--- `[1, 2, 2, 3]`
--- 
--- ```prettyprint
---    [[[[1, 2, 3], [4, 5, 6]],
---      [[7, 8, 9], [10, 11, 12]]]]
--- 
--- ```
--- 
--- Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
--- 
--- ```prettyprint
--- x =  [[[[1, 2, 3, 4],
---        [5, 6, 7, 8]],
---       [[9, 10, 11, 12],
---        [13, 14, 15, 16]]]]
--- ```
--- 
--- the operator will return the following tensor of shape `[1 4 4 1]`:
--- 
--- ```prettyprint
--- x = [[ [1],   [2],  [5],  [6]],
---      [ [3],   [4],  [7],  [8]],
---      [ [9],  [10], [13],  [14]],
---      [ [11], [12], [15],  [16]]]
--- 
--- ```
-depthToSpace :: forall v1 t . (TensorType t) =>
-                Data.Int.Int64 -- ^ __block_size__: The size of the spatial block, same as in Space2Depth.
-                -> Tensor v1 t -- ^ __input__
-                -> Tensor Value t -- ^ __output__
-depthToSpace block_size input | eqLengthGuard [] =
-    buildOp (opDef "DepthToSpace"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "block_size" .~ block_size)
-        input
-{-
-attr { name: "T" type: "type" }
-attr {
-  description: "The size of the spatial block, same as in Space2Depth."
-  has_minimum: true
-  minimum: 2
-  name: "block_size"
-  type: "int"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Generates labels for candidate sampling with a learned unigram distribution.
---
--- See explanations of candidate sampling and the data formats at
--- go/candidate-sampling.
--- 
--- For each batch, this op picks a single set of sampled candidate labels.
--- 
--- The advantages of sampling candidates per-batch are simplicity and the
--- possibility of efficient dense matrix multiplication. The disadvantage is that
--- the sampled candidates must be chosen independently of the context and of the
--- true labels.
-allCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to produce per batch.
-                       -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
-                       -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
-                               -- candidates in a batch are unique. This requires some approximation to
-                               -- estimate the post-rejection sampling probabilities.
-                       -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
-                                                   -- IDs of the num_true target_classes in the corresponding original label.
-                       -> (Tensor Value Data.Int.Int64, Tensor Value Float,
-                           Tensor Value Float)
-                       -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
-                       --
-                       -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
-                       -- the ID of a sampled candidate.
-                       --
-                       -- * __true_expected_count__: A batch_size * num_true matrix, representing
-                       -- the number of times each candidate is expected to occur in a batch
-                       -- of sampled candidates. If unique=true, then this is a probability.
-                       --
-                       -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
-                       -- candidate representing the number of times the candidate is expected
-                       -- to occur in a batch of sampled candidates.  If unique=true, then this is a
-                       -- probability.
-allCandidateSampler num_sampled num_true unique
-                    true_classes | eqLengthGuard [] =
-    buildOp (opDef "AllCandidateSampler"
-             & opAttr "num_sampled" .~ num_sampled
-             & opAttr "num_true" .~ num_true
-             & opAttr "unique" .~ unique)
-        true_classes
-{-
-attr {
-  description: "Number of true labels per context."
-  has_minimum: true
-  minimum: 1
-  name: "num_true"
-  type: "int"
-}
-attr {
-  description: "Number of candidates to produce per batch."
-  has_minimum: true
-  minimum: 1
-  name: "num_sampled"
-  type: "int"
-}
-attr {
-  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
-  name: "unique"
-  type: "bool"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "An second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-input_arg {
-  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
-  name: "true_classes"
-  type: DT_INT64
-}
-output_arg {
-  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
-  name: "sampled_candidates"
-  type: DT_INT64
-}
-output_arg {
-  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
-  name: "true_expected_count"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
-  name: "sampled_expected_count"
-  type: DT_FLOAT
-}
--}
-
--- | Computes the gradient of nearest neighbor interpolation.
-
-resizeNearestNeighborGrad :: forall v1 v2 t . (TensorType t,
-                                               OneOf '[Data.Int.Int32,
-                                                       Data.Int.Int8,
-                                                       Data.Word.Word16,
-                                                       Data.Word.Word8, Double,
-                                                       Float] t) =>
-                             Tensor v1 t -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`.
-                             -> Tensor v2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
-                                                         -- original input size.
-                             -> Tensor Value t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
-                             -- with respect to the input image.
-resizeNearestNeighborGrad grads size | eqLengthGuard [] =
-    buildOp (opDef "ResizeNearestNeighborGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        grads size
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT32
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension."
-  name: "align_corners"
-  type: "bool"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "grads"
-  type_attr: "T"
-}
-input_arg {
-  description: "= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The\noriginal input size."
-  name: "size"
-  type: DT_INT32
-}
-output_arg {
-  description: "4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients\nwith respect to the input image."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Performs greedy decoding on the logits given in inputs.
---
--- A note about the attribute merge_repeated: if enabled, when
--- consecutive logits' maximum indices are the same, only the first of
--- these is emitted.  Labeling the blank '*', the sequence "A B B * B B"
--- becomes "A B" if merge_repeated = True and "A B B B B" if
--- merge_repeated = False.
--- 
--- Regardless of the value of merge_repeated, if the maximum index of a given
--- time and batch corresponds to the blank, index `(num_classes - 1)`, no new
--- element is emitted.
-cTCGreedyDecoder :: Tensor v1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
-                    -> Tensor v2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch_size)`.
-                    -> (Tensor Value Data.Int.Int64,
-                        Tensor Value Data.Int.Int64,
-                        Tensor Value Data.Int.Int64, Tensor Value Float)
-                    -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__)
-                    --
-                    -- * __decoded_indices__: Indices matrix, size `(total_decoded_outputs x 2)`,
-                    -- of a `SparseTensor<int64, 2>`.  The rows store: [batch, time].
-                    --
-                    -- * __decoded_values__: Values vector, size: `(total_decoded_outputs)`,
-                    -- of a `SparseTensor<int64, 2>`.  The vector stores the decoded classes.
-                    --
-                    -- * __decoded_shape__: Shape vector, size `(2)`, of the decoded SparseTensor.
-                    -- Values are: `[batch_size, max_decoded_length]`.
-                    --
-                    -- * __log_probability__: Matrix, size `(batch_size x 1)`, containing sequence
-                    -- log-probabilities.
-cTCGreedyDecoder inputs sequence_length | eqLengthGuard [] =
-    buildOp (opDef "CTCGreedyDecoder")
-        inputs sequence_length
-{-
-attr {
-  default_value { b: false }
-  description: "If True, merge repeated classes in output."
-  name: "merge_repeated"
-  type: "bool"
-}
-input_arg {
-  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
-  name: "inputs"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "A vector containing sequence lengths, size `(batch_size)`."
-  name: "sequence_length"
-  type: DT_INT32
-}
-output_arg {
-  description: "Indices matrix, size `(total_decoded_outputs x 2)`,\nof a `SparseTensor<int64, 2>`.  The rows store: [batch, time]."
-  name: "decoded_indices"
-  type: DT_INT64
-}
-output_arg {
-  description: "Values vector, size: `(total_decoded_outputs)`,\nof a `SparseTensor<int64, 2>`.  The vector stores the decoded classes."
-  name: "decoded_values"
-  type: DT_INT64
-}
-output_arg {
-  description: "Shape vector, size `(2)`, of the decoded SparseTensor.\nValues are: `[batch_size, max_decoded_length]`."
-  name: "decoded_shape"
-  type: DT_INT64
-}
-output_arg {
-  description: "Matrix, size `(batch_size x 1)`, containing sequence\nlog-probabilities."
-  name: "log_probability"
-  type: DT_FLOAT
-}
--}
-
--- | L2 Loss.
---
--- Computes half the L2 norm of a tensor without the `sqrt`:
--- 
---     output = sum(t ** 2) / 2
-l2Loss :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                               (Data.Complex.Complex Float),
-                                               Data.Int.Int16, Data.Int.Int32,
-                                               Data.Int.Int64, Data.Int.Int8,
-                                               Data.Word.Word16,
-                                               Data.Word.Word8, Double,
-                                               Float] t) =>
-          Tensor v1 t -- ^ __t__: Typically 2-D, but may have any dimensions.
-          -> Tensor Value t -- ^ __output__: 0-D.
-l2Loss t | eqLengthGuard [] =
-    buildOp (opDef "L2Loss"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        t
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Typically 2-D, but may have any dimensions."
-  name: "t"
-  type_attr: "T"
-}
-output_arg { description: "0-D." name: "output" type_attr: "T" }
--}
-
--- | Computes the maximum along segments of a tensor.
---
--- Read [the section on Segmentation](../../api_docs/python/math_ops.md#segmentation)
--- for an explanation of segments.
--- 
--- Computes a tensor such that
--- \\(output_i = \max_j(data_j)\\) where `max` is over `j` such
--- that `segment_ids[j] == i`.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/SegmentMax.png" alt>
--- </div>
-segmentMax :: forall v1 v2 t tindices . (TensorType t, OneOf '[Data.Int.Int16,
-                                                               Data.Int.Int32,
-                                                               Data.Int.Int64,
-                                                               Data.Int.Int8,
-                                                               Data.Word.Word16,
-                                                               Data.Word.Word8,
-                                                               Double, Float] t,
-                                         TensorType tindices,
-                                         OneOf '[Data.Int.Int32,
-                                                 Data.Int.Int64] tindices) =>
-              Tensor v1 t -- ^ __data__
-              -> Tensor v2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s
-                                    -- first dimension.  Values should be sorted and can be repeated.
-              -> Tensor Value t -- ^ __output__: Has same shape as data, except for dimension 0 which
-              -- has size `k`, the number of segments.
-segmentMax data' segment_ids | eqLengthGuard [] =
-    buildOp (opDef "SegmentMax"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        data' segment_ids
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg { name: "data" type_attr: "T" }
-input_arg {
-  description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension.  Values should be sorted and can be repeated."
-  name: "segment_ids"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Increments 'ref' until it reaches 'limit'.
-
-countUpTo :: forall t . (TensorType t, OneOf '[Data.Int.Int32,
-                                               Data.Int.Int64] t) =>
-             Data.Int.Int64 -- ^ __limit__: If incrementing ref would bring it above limit, instead generates an
-                            -- 'OutOfRange' error.
-             -> Tensor Ref t -- ^ __ref__: Should be from a scalar `Variable` node.
-             -> Build (Tensor Value t) -- ^ __output__: A copy of the input before increment. If nothing else modifies the
-             -- input, the values produced will all be distinct.
-countUpTo limit ref | eqLengthGuard [] =
-    buildOp (opDef "CountUpTo"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "limit" .~ limit)
-        ref
-{-
-attr {
-  description: "If incrementing ref would bring it above limit, instead generates an\n\'OutOfRange\' error."
-  name: "limit"
-  type: "int"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Should be from a scalar `Variable` node."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-output_arg {
-  description: "A copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | A Reader that outputs the records from a TensorFlow Records file.
-
-tFRecordReader :: Build (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
-tFRecordReader  | eqLengthGuard [] =
-    buildOp (opDef "TFRecordReader")
-        
-{-
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
-  name: "shared_name"
-  type: "string"
-}
-attr {
-  default_value { s: "" } name: "compression_type" type: "string"
-}
-output_arg {
-  description: "The handle to reference the Reader."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
--}
-
--- | Forwards `data` to the output port determined by `pred`.
---
--- If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
--- the data goes to `output_false`.
--- 
--- See also `RefSwitch` and `Merge`.
-switch :: forall v1 v2 t . (TensorType t) =>
-          Tensor v1 t -- ^ __data__: The tensor to be forwarded to the appropriate output.
-          -> Tensor v2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data.
-          -> (Tensor Value t, Tensor Value t)
-          -- ^ (__output_false__, __output_true__)
-          --
-          -- * __output_false__: If `pred` is false, data will be forwarded to this output.
-          --
-          -- * __output_true__: If `pred` is true, data will be forwarded to this output.
-switch data' pred | eqLengthGuard [] =
-    buildOp (opDef "Switch"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        data' pred
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The tensor to be forwarded to the appropriate output."
-  name: "data"
-  type_attr: "T"
-}
-input_arg {
-  description: "A scalar that specifies which output port will receive data."
-  name: "pred"
-  type: DT_BOOL
-}
-output_arg {
-  description: "If `pred` is false, data will be forwarded to this output."
-  name: "output_false"
-  type_attr: "T"
-}
-output_arg {
-  description: "If `pred` is true, data will be forwarded to this output."
-  name: "output_true"
-  type_attr: "T"
-}
--}
-
--- | Computes gradients for SparseSegmentMean.
---
--- Returns tensor "output" with same shape as grad, except for dimension 0 whose
--- value is output_dim0.
-sparseSegmentMeanGrad :: forall v1 v2 v3 v4 t tidx . (TensorType t,
-                                                      OneOf '[Double, Float] t,
-                                                      TensorType tidx,
-                                                      OneOf '[Data.Int.Int32,
-                                                              Data.Int.Int64] tidx) =>
-                         Tensor v1 t -- ^ __grad__: gradient propagated to the SparseSegmentMean op.
-                         -> Tensor v2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentMean op.
-                         -> Tensor v3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentMean op.
-                         -> Tensor v4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentMean op.
-                         -> Tensor Value t -- ^ __output__
-sparseSegmentMeanGrad grad indices segment_ids output_dim0 | eqLengthGuard [] =
-    buildOp (opDef "SparseSegmentMeanGrad"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        grad indices segment_ids output_dim0
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "gradient propagated to the SparseSegmentMean op."
-  name: "grad"
-  type_attr: "T"
-}
-input_arg {
-  description: "indices passed to the corresponding SparseSegmentMean op."
-  name: "indices"
-  type_attr: "Tidx"
-}
-input_arg {
-  description: "segment_ids passed to the corresponding SparseSegmentMean op."
-  name: "segment_ids"
-  type: DT_INT32
-}
-input_arg {
-  description: "dimension 0 of \"data\" passed to SparseSegmentMean op."
-  name: "output_dim0"
-  type: DT_INT32
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Gather values or slices from `params` according to `indices`.
---
--- `params` is a Tensor of rank `P` and `indices` is a Tensor of rank `Q`.
--- 
--- `indices` must be integer tensor, containing indices into `params`.
--- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
--- 
--- The innermost dimension of `indices` (with length `K`) corresponds to
--- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
--- dimension of `params`.
--- 
--- Produces an output tensor with shape
--- 
--- ```
--- [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]].
--- ```
--- 
--- Some examples below.
--- 
--- Simple indexing into a matrix:
--- 
--- ```python
---     indices = [[0, 0], [1, 1]]
---     params = [['a', 'b'], ['c', 'd']]
---     output = ['a', 'd']
--- ```
--- 
--- Slice indexing into a matrix:
--- 
--- ```python
---     indices = [[1], [0]]
---     params = [['a', 'b'], ['c', 'd']]
---     output = [['c', 'd'], ['a', 'b']]
--- ```
--- 
--- Indexing into a 3-tensor:
--- 
--- ```python
---     indices = [[1]]
---     params = [[['a0', 'b0'], ['c0', 'd0']],
---               [['a1', 'b1'], ['c1', 'd1']]]
---     output = [[['a1', 'b1'], ['c1', 'd1']]]
--- 
--- 
---     indices = [[0, 1], [1, 0]]
---     params = [[['a0', 'b0'], ['c0', 'd0']],
---               [['a1', 'b1'], ['c1', 'd1']]]
---     output = [['c0', 'd0'], ['a1', 'b1']]
--- 
--- 
---     indices = [[0, 0, 1], [1, 0, 1]]
---     params = [[['a0', 'b0'], ['c0', 'd0']],
---               [['a1', 'b1'], ['c1', 'd1']]]
---     output = ['b0', 'b1']
--- ```
--- 
--- Batched indexing into a matrix:
--- 
--- ```python
---     indices = [[[0, 0]], [[0, 1]]]
---     params = [['a', 'b'], ['c', 'd']]
---     output = [['a'], ['b']]
--- ```
--- 
--- Batched slice indexing into a matrix:
--- 
--- ```python
---     indices = [[[1]], [[0]]]
---     params = [['a', 'b'], ['c', 'd']]
---     output = [[['c', 'd']], [['a', 'b']]]
--- ```
--- 
--- Batched indexing into a 3-tensor:
--- 
--- ```python
---     indices = [[[1]], [[0]]]
---     params = [[['a0', 'b0'], ['c0', 'd0']],
---               [['a1', 'b1'], ['c1', 'd1']]]
---     output = [[[['a1', 'b1'], ['c1', 'd1']]],
---               [[['a0', 'b0'], ['c0', 'd0']]]]
--- 
---     indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
---     params = [[['a0', 'b0'], ['c0', 'd0']],
---               [['a1', 'b1'], ['c1', 'd1']]]
---     output = [[['c0', 'd0'], ['a1', 'b1']],
---               [['a0', 'b0'], ['c1', 'd1']]]
--- 
--- 
---     indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
---     params = [[['a0', 'b0'], ['c0', 'd0']],
---               [['a1', 'b1'], ['c1', 'd1']]]
---     output = [['b0', 'b1'], ['d0', 'c1']]
--- ```
-gatherNd :: forall v1 v2 tparams tindices . (TensorType tparams,
-                                             TensorType tindices,
-                                             OneOf '[Data.Int.Int32,
-                                                     Data.Int.Int64] tindices) =>
-            Tensor v1 tparams -- ^ __params__: `P-D`.  The tensor from which to gather values.
-            -> Tensor v2 tindices -- ^ __indices__: `Q-D`.  Index tensor having shape `[d_0, ..., d_{Q-2}, K]`.
-            -> Tensor Value tparams -- ^ __output__: `(P+Q-K-1)-D`.  Values from `params` gathered from indices given by
-            -- `indices`.
-gatherNd params indices | eqLengthGuard [] =
-    buildOp (opDef "GatherNd"
-             & opAttr "Tparams" .~ tensorType (undefined :: tparams)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        params indices
-{-
-attr { name: "Tparams" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg {
-  description: "`P-D`.  The tensor from which to gather values."
-  name: "params"
-  type_attr: "Tparams"
-}
-input_arg {
-  description: "`Q-D`.  Index tensor having shape `[d_0, ..., d_{Q-2}, K]`."
-  name: "indices"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "`(P+Q-K-1)-D`.  Values from `params` gathered from indices given by\n`indices`."
-  name: "output"
-  type_attr: "Tparams"
-}
--}
-
--- | Removes dimensions of size 1 from the shape of a tensor.
---
--- Given a tensor `input`, this operation returns a tensor of the same type with
--- all dimensions of size 1 removed. If you don't want to remove all size 1
--- dimensions, you can remove specific size 1 dimensions by specifying
--- `squeeze_dims`.
--- 
--- For example:
--- 
--- ```prettyprint
--- # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
--- shape(squeeze(t)) ==> [2, 3]
--- ```
--- 
--- Or, to remove specific size 1 dimensions:
--- 
--- ```prettyprint
--- # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
--- shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
--- ```
-squeeze :: forall v1 t . (TensorType t) =>
-           Tensor v1 t -- ^ __input__: The `input` to squeeze.
-           -> Tensor Value t -- ^ __output__: Contains the same data as `input`, but has one or more dimensions of
-           -- size 1 removed.
-squeeze input | eqLengthGuard [] =
-    buildOp (opDef "Squeeze"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { name: "T" type: "type" }
-attr {
-  default_value { list { } }
-  description: "If specified, only squeezes the dimensions listed. The dimension\nindex starts at 0. It is an error to squeeze a dimension that is not 1."
-  has_minimum: true
-  name: "squeeze_dims"
-  type: "list(int)"
-}
-input_arg {
-  description: "The `input` to squeeze." name: "input" type_attr: "T"
-}
-output_arg {
-  description: "Contains the same data as `input`, but has one or more dimensions of\nsize 1 removed."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Outputs random values from a uniform distribution.
---
--- The generated values follow a uniform distribution in the range `[0, 1)`. The
--- lower bound 0 is included in the range, while the upper bound 1 is excluded.
-randomUniform :: forall v1 dtype t . (TensorType dtype,
-                                      OneOf '[Data.Word.Word16, Double,
-                                              Float] dtype, TensorType t,
-                                      OneOf '[Data.Int.Int32,
-                                              Data.Int.Int64] t) =>
-                 Tensor v1 t -- ^ __shape__: The shape of the output tensor.
-                 -> Build (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with uniform random values.
-randomUniform shape | eqLengthGuard [] =
-    buildOp (opDef "RandomUniform"
-             & opAttr "dtype" .~ tensorType (undefined :: dtype)
-             & opAttr "T" .~ tensorType (undefined :: t))
-        shape
-{-
-attr {
-  default_value { i: 0 }
-  description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "A second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-attr {
-  allowed_values {
-    list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE }
-  }
-  description: "The type of the output."
-  name: "dtype"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "The shape of the output tensor."
-  name: "shape"
-  type_attr: "T"
-}
-output_arg {
-  description: "A tensor of the specified shape filled with uniform random values."
-  name: "output"
-  type_attr: "dtype"
-}
--}
-
--- | Returns up to `num_records` (key, value) pairs produced by a Reader.
---
--- Will dequeue from the input queue if necessary (e.g. when the
--- Reader needs to start reading from a new file since it has finished
--- with the previous file).
--- It may return less than `num_records` even before the last batch.
-readerReadUpTo :: Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a `Reader`.
-                  -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a `Queue`, with string work items.
-                  -> Tensor v3 Data.Int.Int64 -- ^ __num_records__: number of records to read from `Reader`.
-                  -> Build ((Tensor Value Data.ByteString.ByteString,
-                             Tensor Value Data.ByteString.ByteString))
-                  -- ^ (__keys__, __values__)
-                  --
-                  -- * __keys__: A 1-D tensor.
-                  --
-                  -- * __values__: A 1-D tensor.
-readerReadUpTo reader_handle queue_handle num_records | eqLengthGuard [] =
-    buildOp (opDef "ReaderReadUpTo")
-        reader_handle queue_handle num_records
-{-
-input_arg {
-  description: "Handle to a `Reader`."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "Handle to a `Queue`, with string work items."
-  is_ref: true
-  name: "queue_handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "number of records to read from `Reader`."
-  name: "num_records"
-  type: DT_INT64
-}
-output_arg {
-  description: "A 1-D tensor." name: "keys" type: DT_STRING
-}
-output_arg {
-  description: "A 1-D tensor." name: "values" type: DT_STRING
-}
--}
-
--- | Computes the gradients of 3-D convolution with respect to the input.
-
-conv3DBackpropInput :: forall v1 v2 v3 t . (TensorType t,
-                                            OneOf '[(Data.Complex.Complex Double),
-                                                    (Data.Complex.Complex Float),
-                                                    Data.Int.Int16,
-                                                    Data.Int.Int32,
-                                                    Data.Int.Int64,
-                                                    Data.Int.Int8,
-                                                    Data.Word.Word16,
-                                                    Data.Word.Word8, Double,
-                                                    Float] t) =>
-                       Tensor v1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`.
-                       -> Tensor v2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`.
-                                      -- `in_channels` must match between `input` and `filter`.
-                       -> Tensor v3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
-                                      -- out_channels]`.
-                       -> Tensor Value t -- ^ __output__
-conv3DBackpropInput input filter out_backprop | eqLengthGuard [] =
-    buildOp (opDef "Conv3DBackpropInput"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input filter out_backprop
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
-  has_minimum: true
-  minimum: 5
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg {
-  description: "Shape `[batch, depth, rows, cols, in_channels]`."
-  name: "input"
-  type_attr: "T"
-}
-input_arg {
-  description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
-  name: "filter"
-  type_attr: "T"
-}
-input_arg {
-  description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
-  name: "out_backprop"
-  type_attr: "T"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
---
--- Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
--- and a filter / kernel tensor of shape
--- `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
--- `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
--- a different filter to each input channel (expanding from 1 channel to
--- `channel_multiplier` channels for each), then concatenates the results
--- together. Thus, the output has `in_channels * channel_multiplier` channels.
--- 
--- for k in 0..in_channels-1
---   for q in 0..channel_multiplier-1
---     output[b, i, j, k * channel_multiplier + q] =
---       sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
---                         filter[di, dj, k, q]
--- 
--- Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
--- horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
-depthwiseConv2dNative :: forall v1 v2 t . (TensorType t, OneOf '[Double,
-                                                                 Float] t) =>
-                         Tensor v1 t -- ^ __input__
-                         -> Tensor v2 t -- ^ __filter__
-                         -> Tensor Value t -- ^ __output__
-depthwiseConv2dNative input filter | eqLengthGuard [] =
-    buildOp (opDef "DepthwiseConv2dNative"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input filter
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-attr {
-  description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`."
-  name: "strides"
-  type: "list(int)"
-}
-attr {
-  allowed_values { list { s: "SAME" s: "VALID" } }
-  description: "The type of padding algorithm to use."
-  name: "padding"
-  type: "string"
-}
-input_arg { name: "input" type_attr: "T" }
-input_arg { name: "filter" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Generates labels for candidate sampling with a learned unigram distribution.
---
--- See explanations of candidate sampling and the data formats at
--- go/candidate-sampling.
--- 
--- For each batch, this op picks a single set of sampled candidate labels.
--- 
--- The advantages of sampling candidates per-batch are simplicity and the
--- possibility of efficient dense matrix multiplication. The disadvantage is that
--- the sampled candidates must be chosen independently of the context and of the
--- true labels.
-learnedUnigramCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch.
-                                  -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context.
-                                  -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max).
-                                  -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled
-                                          -- candidates in a batch are unique. This requires some approximation to
-                                          -- estimate the post-rejection sampling probabilities.
-                                  -> Tensor v1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the
-                                                              -- IDs of the num_true target_classes in the corresponding original label.
-                                  -> (Tensor Value Data.Int.Int64,
-                                      Tensor Value Float, Tensor Value Float)
-                                  -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__)
-                                  --
-                                  -- * __sampled_candidates__: A vector of length num_sampled, in which each element is
-                                  -- the ID of a sampled candidate.
-                                  --
-                                  -- * __true_expected_count__: A batch_size * num_true matrix, representing
-                                  -- the number of times each candidate is expected to occur in a batch
-                                  -- of sampled candidates. If unique=true, then this is a probability.
-                                  --
-                                  -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled
-                                  -- candidate representing the number of times the candidate is expected
-                                  -- to occur in a batch of sampled candidates.  If unique=true, then this is a
-                                  -- probability.
-learnedUnigramCandidateSampler num_sampled num_true range_max unique
-                               true_classes | eqLengthGuard [] =
-    buildOp (opDef "LearnedUnigramCandidateSampler"
-             & opAttr "num_sampled" .~ num_sampled
-             & opAttr "num_true" .~ num_true
-             & opAttr "range_max" .~ range_max
-             & opAttr "unique" .~ unique)
-        true_classes
-{-
-attr {
-  description: "Number of true labels per context."
-  has_minimum: true
-  minimum: 1
-  name: "num_true"
-  type: "int"
-}
-attr {
-  description: "Number of candidates to randomly sample per batch."
-  has_minimum: true
-  minimum: 1
-  name: "num_sampled"
-  type: "int"
-}
-attr {
-  description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
-  name: "unique"
-  type: "bool"
-}
-attr {
-  description: "The sampler will sample integers from the interval [0, range_max)."
-  has_minimum: true
-  minimum: 1
-  name: "range_max"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "An second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-input_arg {
-  description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
-  name: "true_classes"
-  type: DT_INT64
-}
-output_arg {
-  description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
-  name: "sampled_candidates"
-  type: DT_INT64
-}
-output_arg {
-  description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
-  name: "true_expected_count"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
-  name: "sampled_expected_count"
-  type: DT_FLOAT
-}
--}
-
--- | Table initializer that takes two tensors for keys and values respectively.
-
-initializeTable :: forall v2 v3 tkey tval . (TensorType tkey,
-                                             TensorType tval) =>
-                   Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized.
-                   -> Tensor v2 tkey -- ^ __keys__: Keys of type Tkey.
-                   -> Tensor v3 tval -- ^ __values__: Values of type Tval.
-                   -> Build (ControlNode)
-initializeTable table_handle keys values | eqLengthGuard [] =
-    buildOp (opDef "InitializeTable"
-             & opAttr "Tkey" .~ tensorType (undefined :: tkey)
-             & opAttr "Tval" .~ tensorType (undefined :: tval))
-        table_handle keys values
-{-
-attr { name: "Tkey" type: "type" }
-attr { name: "Tval" type: "type" }
-input_arg {
-  description: "Handle to a table which will be initialized."
-  is_ref: true
-  name: "table_handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "Keys of type Tkey." name: "keys" type_attr: "Tkey"
-}
-input_arg {
-  description: "Values of type Tval."
-  name: "values"
-  type_attr: "Tval"
-}
--}
-
--- | Forwards the value of an available tensor from `inputs` to `output`.
---
--- `Merge` waits for at least one of the tensors in `inputs` to become available.
--- It is usually combined with `Switch` to implement branching.
--- 
--- `Merge` forwards the first tensor for become available to `output`, and sets
--- `value_index` to its index in `inputs`.
-merge :: forall v1 t . (TensorType t) =>
-         [Tensor v1 t] -- ^ __inputs__: The input tensors, exactly one of which will become available.
-         -> (Tensor Value t, Tensor Value Data.Int.Int32)
-         -- ^ (__output__, __value_index__)
-         --
-         -- * __output__: Will be set to the available input tensor.
-         --
-         -- * __value_index__: The index of the chosen input tensor in `inputs`.
-merge inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
-    buildOp (opDef "Merge"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "N" .~ n)
-        inputs
-  where
-    n = fromIntegral (length inputs) :: Int64
-{-
-attr { name: "T" type: "type" }
-attr { has_minimum: true minimum: 1 name: "N" type: "int" }
-input_arg {
-  description: "The input tensors, exactly one of which will become available."
-  name: "inputs"
-  number_attr: "N"
-  type_attr: "T"
-}
-output_arg {
-  description: "Will be set to the available input tensor."
-  name: "output"
-  type_attr: "T"
-}
-output_arg {
-  description: "The index of the chosen input tensor in `inputs`."
-  name: "value_index"
-  type: DT_INT32
-}
--}
-
--- | Forwards the value of an available tensor from `inputs` to `output`.
---
--- `Merge` waits for at least one of the tensors in `inputs` to become available.
--- It is usually combined with `Switch` to implement branching.
--- 
--- `Merge` forwards the first tensor for become available to `output`, and sets
--- `value_index` to its index in `inputs`.
-refMerge :: forall t . (TensorType t) =>
-            [Tensor Ref t] -- ^ __inputs__: The input tensors, exactly one of which will become available.
-            -> Build ((Tensor Ref t, Tensor Value Data.Int.Int32))
-            -- ^ (__output__, __value_index__)
-            --
-            -- * __output__: Will be set to the available input tensor.
-            --
-            -- * __value_index__: The index of the chosen input tensor in `inputs`.
-refMerge inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
-    buildOp (opDef "RefMerge"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "N" .~ n)
-        inputs
-  where
-    n = fromIntegral (length inputs) :: Int64
-{-
-attr { name: "T" type: "type" }
-attr { has_minimum: true minimum: 1 name: "N" type: "int" }
-input_arg {
-  description: "The input tensors, exactly one of which will become available."
-  is_ref: true
-  name: "inputs"
-  number_attr: "N"
-  type_attr: "T"
-}
-output_arg {
-  description: "Will be set to the available input tensor."
-  is_ref: true
-  name: "output"
-  type_attr: "T"
-}
-output_arg {
-  description: "The index of the chosen input tensor in `inputs`."
-  name: "value_index"
-  type: DT_INT32
-}
--}
-
--- | Rounds the values of a tensor to the nearest integer, element-wise.
---
--- Rounds half to even.  Also known as bankers rounding. If you want to round
--- according to the current system rounding mode use std::cint.
-round :: forall v1 t . (TensorType t, OneOf '[(Data.Complex.Complex Double),
-                                              (Data.Complex.Complex Float),
-                                              Data.Int.Int32, Data.Int.Int64,
-                                              Data.Word.Word16, Double,
-                                              Float] t) =>
-         Tensor v1 t -- ^ __x__
-         -> Tensor Value t -- ^ __y__
-round x | eqLengthGuard [] =
-    buildOp (opDef "Round"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-output_arg { name: "y" type_attr: "T" }
--}
-
--- | 
-
-batchSelfAdjointEig :: forall v1 t . (TensorType t, OneOf '[Double, Float] t) =>
-                       Tensor v1 t -- ^ __input__
-                       -> Tensor Value t -- ^ __output__
-batchSelfAdjointEig input | eqLengthGuard [] =
-    buildOp (opDef "BatchSelfAdjointEig"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr {
-  allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "input" type_attr: "T" }
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | Partitions `data` into `num_partitions` tensors using indices from `partitions`.
---
--- For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
--- becomes part of `outputs[partitions[js]]`.  The slices with `partitions[js] = i`
--- are placed in `outputs[i]` in lexicographic order of `js`, and the first
--- dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
--- In detail,
--- 
--- ```python
---     outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
--- 
---     outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
--- ```
--- 
--- `data.shape` must start with `partitions.shape`.
--- 
--- For example:
--- 
--- ```python
---     # Scalar partitions.
---     partitions = 1
---     num_partitions = 2
---     data = [10, 20]
---     outputs[0] = []  # Empty with shape [0, 2]
---     outputs[1] = [[10, 20]]
--- 
---     # Vector partitions.
---     partitions = [0, 0, 1, 1, 0]
---     num_partitions = 2
---     data = [10, 20, 30, 40, 50]
---     outputs[0] = [10, 20, 50]
---     outputs[1] = [30, 40]
--- ```
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/DynamicPartition.png" alt>
--- </div>
-dynamicPartition :: forall v1 v2 t . (TensorType t) =>
-                    Data.Int.Int64 -- ^ __num_partitions__: The number of partitions to output.
-                    -> Tensor v1 t -- ^ __data__
-                    -> Tensor v2 Data.Int.Int32 -- ^ __partitions__: Any shape.  Indices in the range `[0, num_partitions)`.
-                    -> [Tensor Value t] -- ^ __outputs__
-dynamicPartition num_partitions data' partitions | eqLengthGuard [] =
-    buildListOp [num_partitions] (opDef "DynamicPartition"
-                                  & opAttr "T" .~ tensorType (undefined :: t)
-                                  & opAttr "num_partitions" .~ num_partitions)
-        data' partitions
-{-
-attr {
-  description: "The number of partitions to output."
-  has_minimum: true
-  minimum: 1
-  name: "num_partitions"
-  type: "int"
-}
-attr { name: "T" type: "type" }
-input_arg { name: "data" type_attr: "T" }
-input_arg {
-  description: "Any shape.  Indices in the range `[0, num_partitions)`."
-  name: "partitions"
-  type: DT_INT32
-}
-output_arg {
-  name: "outputs" number_attr: "num_partitions" type_attr: "T"
-}
--}
-
--- | Reshapes a tensor.
---
--- Given `tensor`, this operation returns a tensor that has the same values
--- as `tensor` with shape `shape`.
--- 
--- If one component of `shape` is the special value -1, the size of that dimension
--- is computed so that the total size remains constant.  In particular, a `shape`
--- of `[-1]` flattens into 1-D.  At most one component of `shape` can be -1.
--- 
--- If `shape` is 1-D or higher, then the operation returns a tensor with shape
--- `shape` filled with the values of `tensor`. In this case, the number of elements
--- implied by `shape` must be the same as the number of elements in `tensor`.
--- 
--- For example:
--- 
--- ```prettyprint
--- # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
--- # tensor 't' has shape [9]
--- reshape(t, [3, 3]) ==> [[1, 2, 3],
---                         [4, 5, 6],
---                         [7, 8, 9]]
--- 
--- # tensor 't' is [[[1, 1], [2, 2]],
--- #                [[3, 3], [4, 4]]]
--- # tensor 't' has shape [2, 2, 2]
--- reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
---                         [3, 3, 4, 4]]
--- 
--- # tensor 't' is [[[1, 1, 1],
--- #                 [2, 2, 2]],
--- #                [[3, 3, 3],
--- #                 [4, 4, 4]],
--- #                [[5, 5, 5],
--- #                 [6, 6, 6]]]
--- # tensor 't' has shape [3, 2, 3]
--- # pass '[-1]' to flatten 't'
--- reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
--- 
--- # -1 can also be used to infer the shape
--- 
--- # -1 is inferred to be 9:
--- reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
---                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
--- # -1 is inferred to be 2:
--- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
---                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
--- # -1 is inferred to be 3:
--- reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
---                               [2, 2, 2],
---                               [3, 3, 3]],
---                              [[4, 4, 4],
---                               [5, 5, 5],
---                               [6, 6, 6]]]
--- 
--- # tensor 't' is [7]
--- # shape `[]` reshapes to a scalar
--- reshape(t, []) ==> 7
--- ```
-reshape :: forall v1 v2 t tshape . (TensorType t, TensorType tshape,
-                                    OneOf '[Data.Int.Int32,
-                                            Data.Int.Int64] tshape) =>
-           Tensor v1 t -- ^ __tensor__
-           -> Tensor v2 tshape -- ^ __shape__: Defines the shape of the output tensor.
-           -> Tensor Value t -- ^ __output__
-reshape tensor shape | eqLengthGuard [] =
-    buildOp (opDef "Reshape"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tshape" .~ tensorType (undefined :: tshape))
-        tensor shape
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tshape"
-  type: "type"
-}
-input_arg { name: "tensor" type_attr: "T" }
-input_arg {
-  description: "Defines the shape of the output tensor."
-  name: "shape"
-  type_attr: "Tshape"
-}
-output_arg { name: "output" type_attr: "T" }
--}
-
--- | A Reader that outputs fixed-length records from a file.
-
-fixedLengthRecordReader :: Data.Int.Int64 -- ^ __record_bytes__
-                           -> Build (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader.
-fixedLengthRecordReader record_bytes | eqLengthGuard [] =
-    buildOp (opDef "FixedLengthRecordReader"
-             & opAttr "record_bytes" .~ record_bytes)
-        
-{-
-attr { default_value { i: 0 } name: "header_bytes" type: "int" }
-attr { name: "record_bytes" type: "int" }
-attr { default_value { i: 0 } name: "footer_bytes" type: "int" }
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead."
-  name: "shared_name"
-  type: "string"
-}
-output_arg {
-  description: "The handle to reference the Reader."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
--}
-
--- | Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
---
--- linear models with L1 + L2 regularization. As global optimization objective is
--- strongly-convex, the optimizer optimizes the dual objective at each step. The
--- optimizer applies each update one example at a time. Examples are sampled
--- uniformly, and the optimizer is learning rate free and enjoys linear convergence
--- rate.
--- 
--- Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai; Zhang, Tong.
--- 2012 arXiv1211.2717S: http://arxiv.org/pdf/1211.2717v1.pdf
--- 
---   Loss objective = \sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|
--- 
--- Adding vs. Averaging in Distributed Primal-Dual Optimization.
--- Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik,
--- Martin Takac http://arxiv.org/abs/1502.03508
--- 
--- Stochastic Dual Coordinate Ascent with Adaptive Probabilities
--- Dominik Csiba, Zheng Qu, Peter Richtarik https://arxiv.org/abs/1502.08053
-sdcaOptimizer :: Float -- ^ __l1__: Symmetric l1 regularization strength.
-                 -> Float -- ^ __l2__: Symmetric l2 regularization strength.
-                 -> Data.Int.Int64 -- ^ __num_inner_iterations__: Number of iterations per mini-batch.
-                 -> Data.Int.Int64 -- ^ __num_loss_partitions__: Number of partitions of the global loss function.
-                 -> [Tensor v1 Data.Int.Int64] -- ^ __sparse_example_indices__: a list of vectors which contain example indices.
-                 -> [Tensor v2 Data.Int.Int64] -- ^ __sparse_feature_indices__: a list of vectors which contain feature indices.
-                 -> [Tensor v3 Float] -- ^ __sparse_feature_values__: a list of vectors which contains feature value
-                                      -- associated with each feature group.
-                 -> [Tensor v4 Float] -- ^ __dense_features__: a list of matrices which contains the dense feature values.
-                 -> Tensor v5 Float -- ^ __example_weights__: a vector which contains the weight associated with each
-                                    -- example.
-                 -> Tensor v6 Float -- ^ __example_labels__: a vector which contains the label/target associated with each
-                                    -- example.
-                 -> [Tensor v7 Data.Int.Int64] -- ^ __sparse_indices__: a list of vectors where each value is the indices which has
-                                               -- corresponding weights in sparse_weights. This field maybe ommitted for the
-                                               -- dense approach.
-                 -> [Tensor v8 Float] -- ^ __sparse_weights__: a list of vectors where each value is the weight associated with
-                                      -- a sparse feature group.
-                 -> [Tensor v9 Float] -- ^ __dense_weights__: a list of vectors where the values are the weights associated
-                                      -- with a dense feature group.
-                 -> Tensor v10 Float -- ^ __example_state_data__: a list of vectors containing the example state data.
-                 -> (Tensor Value Float, [Tensor Value Float],
-                     [Tensor Value Float])
-                 -- ^ (__out_example_state_data__, __out_delta_sparse_weights__, __out_delta_dense_weights__)
-                 --
-                 -- * __out_example_state_data__: a list of vectors containing the updated example state
-                 -- data.
-                 --
-                 -- * __out_delta_sparse_weights__: a list of vectors where each value is the delta
-                 -- weights associated with a sparse feature group.
-                 --
-                 -- * __out_delta_dense_weights__: a list of vectors where the values are the delta
-                 -- weights associated with a dense feature group.
-sdcaOptimizer l1 l2 num_inner_iterations num_loss_partitions
-              sparse_example_indices sparse_feature_indices
-              sparse_feature_values dense_features example_weights
-              example_labels sparse_indices sparse_weights dense_weights
-              example_state_data | eqLengthGuard [("num_sparse_features", [("sparse_example_indices", length sparse_example_indices),
-                                                                           ("sparse_feature_indices", length sparse_feature_indices),
-                                                                           ("sparse_indices", length sparse_indices),
-                                                                           ("sparse_weights", length sparse_weights)]),
-                                                  ("num_sparse_features_with_values", [("sparse_feature_values", length sparse_feature_values)]),
-                                                  ("num_dense_features", [("dense_features", length dense_features),
-                                                                          ("dense_weights", length dense_weights)])] =
-    buildListOp [num_sparse_features, num_dense_features] (opDef "SdcaOptimizer"
-                                                           & opAttr "l1" .~ l1
-                                                           & opAttr "l2" .~ l2
-                                                           & opAttr "num_inner_iterations" .~ num_inner_iterations
-                                                           & opAttr "num_loss_partitions" .~ num_loss_partitions
-                                                           & opAttr "num_sparse_features" .~ num_sparse_features
-                                                           & opAttr "num_sparse_features_with_values" .~ num_sparse_features_with_values
-                                                           & opAttr "num_dense_features" .~ num_dense_features)
-        sparse_example_indices sparse_feature_indices sparse_feature_values
-        dense_features example_weights example_labels sparse_indices
-        sparse_weights dense_weights example_state_data
-  where
-    num_sparse_features = fromIntegral (length sparse_example_indices) :: Int64
-    num_sparse_features_with_values = fromIntegral (length sparse_feature_values) :: Int64
-    num_dense_features = fromIntegral (length dense_features) :: Int64
-{-
-attr {
-  allowed_values {
-    list {
-      s: "logistic_loss"
-      s: "squared_loss"
-      s: "hinge_loss"
-      s: "smooth_hinge_loss"
-    }
-  }
-  description: "Type of the primal loss. Currently SdcaSolver supports logistic,\nsquared and hinge losses."
-  name: "loss_type"
-  type: "string"
-}
-attr {
-  default_value { b: false }
-  description: "Whether to use Adapative SDCA for the inner loop."
-  name: "adaptative"
-  type: "bool"
-}
-attr {
-  description: "Number of sparse feature groups to train on."
-  has_minimum: true
-  name: "num_sparse_features"
-  type: "int"
-}
-attr {
-  description: "Number of sparse feature groups with values\nassociated with it, otherwise implicitly treats values as 1.0."
-  has_minimum: true
-  name: "num_sparse_features_with_values"
-  type: "int"
-}
-attr {
-  description: "Number of dense feature groups to train on."
-  has_minimum: true
-  name: "num_dense_features"
-  type: "int"
-}
-attr {
-  description: "Symmetric l1 regularization strength."
-  name: "l1"
-  type: "float"
-}
-attr {
-  description: "Symmetric l2 regularization strength."
-  name: "l2"
-  type: "float"
-}
-attr {
-  description: "Number of partitions of the global loss function."
-  has_minimum: true
-  minimum: 1
-  name: "num_loss_partitions"
-  type: "int"
-}
-attr {
-  description: "Number of iterations per mini-batch."
-  has_minimum: true
-  minimum: 1
-  name: "num_inner_iterations"
-  type: "int"
-}
-input_arg {
-  description: "a list of vectors which contain example indices."
-  name: "sparse_example_indices"
-  number_attr: "num_sparse_features"
-  type: DT_INT64
-}
-input_arg {
-  description: "a list of vectors which contain feature indices."
-  name: "sparse_feature_indices"
-  number_attr: "num_sparse_features"
-  type: DT_INT64
-}
-input_arg {
-  description: "a list of vectors which contains feature value\nassociated with each feature group."
-  name: "sparse_feature_values"
-  number_attr: "num_sparse_features_with_values"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "a list of matrices which contains the dense feature values."
-  name: "dense_features"
-  number_attr: "num_dense_features"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "a vector which contains the weight associated with each\nexample."
-  name: "example_weights"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "a vector which contains the label/target associated with each\nexample."
-  name: "example_labels"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "a list of vectors where each value is the indices which has\ncorresponding weights in sparse_weights. This field maybe ommitted for the\ndense approach."
-  name: "sparse_indices"
-  number_attr: "num_sparse_features"
-  type: DT_INT64
-}
-input_arg {
-  description: "a list of vectors where each value is the weight associated with\na sparse feature group."
-  name: "sparse_weights"
-  number_attr: "num_sparse_features"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "a list of vectors where the values are the weights associated\nwith a dense feature group."
-  name: "dense_weights"
-  number_attr: "num_dense_features"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "a list of vectors containing the example state data."
-  name: "example_state_data"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "a list of vectors containing the updated example state\ndata."
-  name: "out_example_state_data"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "a list of vectors where each value is the delta\nweights associated with a sparse feature group."
-  name: "out_delta_sparse_weights"
-  number_attr: "num_sparse_features"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "a list of vectors where the values are the delta\nweights associated with a dense feature group."
-  name: "out_delta_dense_weights"
-  number_attr: "num_dense_features"
-  type: DT_FLOAT
-}
--}
-
--- | Resize `images` to `size` using area interpolation.
---
--- Input images can be of different types but output images are always float.
-resizeArea :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                      Data.Int.Int32,
-                                                      Data.Int.Int64,
-                                                      Data.Int.Int8,
-                                                      Data.Word.Word16,
-                                                      Data.Word.Word8, Double,
-                                                      Float] t) =>
-              Tensor v1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`.
-              -> Tensor v2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
-                                          -- new size for the images.
-              -> Tensor Value Float -- ^ __resized_images__: 4-D with shape
-              -- `[batch, new_height, new_width, channels]`.
-resizeArea images size | eqLengthGuard [] =
-    buildOp (opDef "ResizeArea"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        images size
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension."
-  name: "align_corners"
-  type: "bool"
-}
-input_arg {
-  description: "4-D with shape `[batch, height, width, channels]`."
-  name: "images"
-  type_attr: "T"
-}
-input_arg {
-  description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The\nnew size for the images."
-  name: "size"
-  type: DT_INT32
-}
-output_arg {
-  description: "4-D with shape\n`[batch, new_height, new_width, channels]`."
-  name: "resized_images"
-  type: DT_FLOAT
-}
--}
-
--- | Generates values in an interval.
---
--- A sequence of `num` evenly-spaced values are generated beginning at `start`.
--- If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
--- so that the last one is exactly `stop`.
--- 
--- For example:
--- 
--- ```
--- tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
--- ```
-linSpace :: forall v1 v2 v3 t tidx . (TensorType t, OneOf '[Double, Float] t,
-                                      TensorType tidx, OneOf '[Data.Int.Int32,
-                                                               Data.Int.Int64] tidx) =>
-            Tensor v1 t -- ^ __start__: First entry in the range.
-            -> Tensor v2 t -- ^ __stop__: Last entry in the range.
-            -> Tensor v3 tidx -- ^ __num__: Number of values to generate.
-            -> Tensor Value t -- ^ __output__: 1-D. The generated values.
-linSpace start stop num | eqLengthGuard [] =
-    buildOp (opDef "LinSpace"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        start stop num
-{-
-attr {
-  allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "First entry in the range."
-  name: "start"
-  type_attr: "T"
-}
-input_arg {
-  description: "Last entry in the range." name: "stop" type_attr: "T"
-}
-input_arg {
-  description: "Number of values to generate."
-  name: "num"
-  type_attr: "Tidx"
-}
-output_arg {
-  description: "1-D. The generated values."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Calculates the CTC Loss (log probability) for each batch entry.  Also calculates
---
--- the gradient.  This class performs the softmax operation for you, so inputs
--- should be e.g. linear projections of outputs by an LSTM.
-cTCLoss :: Tensor v1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
-           -> Tensor v2 Data.Int.Int64 -- ^ __labels_indices__: The indices of a `SparseTensor<int32, 2>`.
-                                       -- `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
-                                       -- `(batch b, time t)`.
-           -> Tensor v3 Data.Int.Int32 -- ^ __labels_values__: The values (labels) associated with the given batch and time.
-           -> Tensor v4 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths (batch).
-           -> (Tensor Value Float, Tensor Value Float)
-           -- ^ (__loss__, __gradient__)
-           --
-           -- * __loss__: A vector (batch) containing log-probabilities.
-           --
-           -- * __gradient__: The gradient of `loss`.  3-D, shape:
-           -- `(max_time x batch_size x num_classes)`.
-cTCLoss inputs labels_indices labels_values sequence_length | eqLengthGuard [] =
-    buildOp (opDef "CTCLoss")
-        inputs labels_indices labels_values sequence_length
-{-
-attr {
-  default_value { b: false }
-  description: "Scalar, if true then repeated labels are\ncollapsed prior to the CTC calculation."
-  name: "preprocess_collapse_repeated"
-  type: "bool"
-}
-attr {
-  default_value { b: true }
-  description: "Scalar.  If set to false, *during* CTC calculation\nrepeated non-blank labels will not be merged and are interpreted as\nindividual labels.  This is a simplified version of CTC."
-  name: "ctc_merge_repeated"
-  type: "bool"
-}
-input_arg {
-  description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
-  name: "inputs"
-  type: DT_FLOAT
-}
-input_arg {
-  description: "The indices of a `SparseTensor<int32, 2>`.\n`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for\n`(batch b, time t)`."
-  name: "labels_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "The values (labels) associated with the given batch and time."
-  name: "labels_values"
-  type: DT_INT32
-}
-input_arg {
-  description: "A vector containing sequence lengths (batch)."
-  name: "sequence_length"
-  type: DT_INT32
-}
-output_arg {
-  description: "A vector (batch) containing log-probabilities."
-  name: "loss"
-  type: DT_FLOAT
-}
-output_arg {
-  description: "The gradient of `loss`.  3-D, shape:\n`(max_time x batch_size x num_classes)`."
-  name: "gradient"
-  type: DT_FLOAT
-}
--}
-
--- | Returns the batched diagonal part of a batched tensor.
---
--- This operation returns a tensor with the `diagonal` part
--- of the batched `input`. The `diagonal` part is computed as follows:
--- 
--- Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
--- tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
--- 
--- `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
--- 
--- The input must be at least a matrix.
--- 
--- For example:
--- 
--- ```prettyprint
--- # 'input' is [[[1, 0, 0, 0]
---                [0, 2, 0, 0]
---                [0, 0, 3, 0]
---                [0, 0, 0, 4]],
---               [[5, 0, 0, 0]
---                [0, 6, 0, 0]
---                [0, 0, 7, 0]
---                [0, 0, 0, 8]]]
--- 
--- and input.shape = (2, 4, 4)
--- 
--- tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
--- 
--- which has shape (2, 4)
--- ```
-matrixDiagPart :: forall v1 t . (TensorType t) =>
-                  Tensor v1 t -- ^ __input__: Rank `k` tensor where `k >= 2`.
-                  -> Tensor Value t -- ^ __diagonal__: The extracted diagonal(s) having shape
-                  -- `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
-matrixDiagPart input | eqLengthGuard [] =
-    buildOp (opDef "MatrixDiagPart"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "Rank `k` tensor where `k >= 2`."
-  name: "input"
-  type_attr: "T"
-}
-output_arg {
-  description: "The extracted diagonal(s) having shape\n`diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`."
-  name: "diagonal"
-  type_attr: "T"
-}
--}
-
--- | Creates or finds a child frame, and makes `data` available to the child frame.
---
--- This op is used together with `Exit` to create loops in the graph.
--- The unique `frame_name` is used by the `Executor` to identify frames. If
--- `is_constant` is true, `output` is a constant in the child frame; otherwise
--- it may be changed in the child frame. At most `parallel_iterations` iterations
--- are run in parallel in the child frame.
-enter :: forall v1 t . (TensorType t) =>
-         Tensor v1 t -- ^ __data__: The tensor to be made available to the child frame.
-         -> Tensor Value t -- ^ __output__: The same tensor as `data`.
-enter data' | eqLengthGuard [] =
-    buildOp (opDef "Enter"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        data'
-{-
-attr { name: "T" type: "type" }
-attr {
-  description: "The name of the child frame."
-  name: "frame_name"
-  type: "string"
-}
-attr {
-  default_value { b: false }
-  description: "If true, the output is constant within the child frame."
-  name: "is_constant"
-  type: "bool"
-}
-attr {
-  default_value { i: 10 }
-  description: "The number of iterations allowed to run in parallel."
-  name: "parallel_iterations"
-  type: "int"
-}
-input_arg {
-  description: "The tensor to be made available to the child frame."
-  name: "data"
-  type_attr: "T"
-}
-output_arg {
-  description: "The same tensor as `data`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | PNG-encode an image.
---
--- `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
--- where `channels` is:
--- 
--- *   1: for grayscale.
--- *   2: for grayscale + alpha.
--- *   3: for RGB.
--- *   4: for RGBA.
--- 
--- The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
--- default or a value from 0 to 9.  9 is the highest compression level, generating
--- the smallest output, but is slower.
-encodePng :: forall v1 t . (TensorType t, OneOf '[Data.Word.Word16,
-                                                  Data.Word.Word8] t) =>
-             Tensor v1 t -- ^ __image__: 3-D with shape `[height, width, channels]`.
-             -> Tensor Value Data.ByteString.ByteString -- ^ __contents__: 0-D. PNG-encoded image.
-encodePng image | eqLengthGuard [] =
-    buildOp (opDef "EncodePng"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        image
-{-
-attr {
-  default_value { i: -1 }
-  description: "Compression level."
-  name: "compression"
-  type: "int"
-}
-attr {
-  allowed_values { list { type: DT_UINT8 type: DT_UINT16 } }
-  default_value { type: DT_UINT8 }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "3-D with shape `[height, width, channels]`."
-  name: "image"
-  type_attr: "T"
-}
-output_arg {
-  description: "0-D. PNG-encoded image."
-  name: "contents"
-  type: DT_STRING
-}
--}
-
--- | Exits the current frame to its parent frame.
---
--- Exit makes its input `data` available to the parent frame.
-exit :: forall v1 t . (TensorType t) =>
-        Tensor v1 t -- ^ __data__: The tensor to be made available to the parent frame.
-        -> Tensor Value t -- ^ __output__: The same tensor as `data`.
-exit data' | eqLengthGuard [] =
-    buildOp (opDef "Exit"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        data'
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The tensor to be made available to the parent frame."
-  name: "data"
-  type_attr: "T"
-}
-output_arg {
-  description: "The same tensor as `data`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Creates a new tensor by applying sparse `updates` to individual
---
--- values or slices within a zero tensor of the given `shape` tensor according to
--- indices.  This operator is the inverse of the [tf.gather_nd](#gather_nd)
--- operator which extracts values or slices from a given tensor.
--- 
--- TODO(simister): Add a link to Variable.__getitem__ documentation on slice
--- syntax.
--- 
--- `shape` is a `TensorShape` with rank `P` and `indices` is a `Tensor` of rank
--- `Q`.
--- 
--- `indices` must be integer tensor, containing indices into `shape`.
--- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
--- 
--- The innermost dimension of `indices` (with length `K`) corresponds to
--- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
--- dimension of `shape`.
--- 
--- `updates` is Tensor of rank `Q-1+P-K` with shape:
--- 
--- ```
--- [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]].
--- ```
--- 
--- The simplest form of scatter is to insert individual elements in a tensor by
--- index. For example, say we want to insert 4 scattered elements in a rank-1
--- tensor with 8 elements.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/ScatterNd1.png" alt>
--- </div>
--- 
--- In Python, this scatter operation would look like this:
--- 
---     indices = tf.constant([[4], [3], [1], [7]])
---     updates = tf.constant([9, 10, 11, 12])
---     shape = tf.constant([8])
---     scatter = tf.scatter_nd(indices, updates, shape)
---     with tf.Session() as sess:
---       print sess.run(scatter)
--- 
--- The resulting tensor would look like this:
--- 
---     [0, 11, 0, 10, 9, 0, 0, 12]
--- 
--- We can also, insert entire slices of a higher rank tensor all at once. For
--- example, if we wanted to insert two slices in the first dimension of a
--- rank-3 tensor with two matrices of new values.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/ScatterNd2.png" alt>
--- </div>
--- 
--- In Python, this scatter operation would look like this:
--- 
---     indices = tf.constant([[0], [2]])
---     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
---                             [7, 7, 7, 7], [8, 8, 8, 8]],
---                            [[5, 5, 5, 5], [6, 6, 6, 6],
---                             [7, 7, 7, 7], [8, 8, 8, 8]]])
---     shape = tf.constant([4, 4, 4])
---     scatter = tf.scatter_nd(indices, updates, shape)
---     with tf.Session() as sess:
---       print sess.run(scatter)
--- 
--- The resulting tensor would look like this:
--- 
---     [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
---      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
---      [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
---      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
-scatterNd :: forall v1 v2 v3 t tindices . (TensorType t, TensorType tindices,
-                                           OneOf '[Data.Int.Int32,
-                                                   Data.Int.Int64] tindices) =>
-             Tensor v1 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64.
-                                -- A tensor of indices into ref.
-             -> Tensor v2 t -- ^ __updates__: A Tensor. Must have the same type as tensor. A tensor of updated values
-                            -- to store in ref.
-             -> Tensor v3 tindices -- ^ __shape__: A vector. The shape of the resulting tensor.
-             -> Tensor Value t -- ^ __output__: A new tensor with the given shape and updates applied according
-             -- to the indices.
-scatterNd indices updates shape | eqLengthGuard [] =
-    buildOp (opDef "ScatterNd"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        indices updates shape
-{-
-attr { name: "T" type: "type" }
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-input_arg {
-  description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "A Tensor. Must have the same type as tensor. A tensor of updated values\nto store in ref."
-  name: "updates"
-  type_attr: "T"
-}
-input_arg {
-  description: "A vector. The shape of the resulting tensor."
-  name: "shape"
-  type_attr: "Tindices"
-}
-output_arg {
-  description: "A new tensor with the given shape and updates applied according\nto the indices."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | A queue that produces elements sorted by the first component value.
---
--- Note that the PriorityQueue requires the first component of any element
--- to be a scalar int64, in addition to the other elements declared by
--- component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
--- and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
--- entry in their input (resp. output) lists.
-priorityQueue :: Build (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue.
-priorityQueue  | eqLengthGuard [] =
-    buildOp (opDef "PriorityQueue")
-        
-{-
-attr {
-  default_value { list { } }
-  description: "The type of each component in a value."
-  has_minimum: true
-  name: "component_types"
-  type: "list(type)"
-}
-attr {
-  description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time."
-  has_minimum: true
-  name: "shapes"
-  type: "list(shape)"
-}
-attr {
-  default_value { i: -1 }
-  description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit."
-  name: "capacity"
-  type: "int"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used."
-  name: "container"
-  type: "string"
-}
-attr {
-  default_value { s: "" }
-  description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions."
-  name: "shared_name"
-  type: "string"
-}
-output_arg {
-  description: "The handle to the queue."
-  is_ref: true
-  name: "handle"
-  type: DT_STRING
-}
--}
-
--- | Forwards the ref tensor `data` to the output port determined by `pred`.
---
--- If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
--- the data goes to `output_false`.
--- 
--- See also `Switch` and `Merge`.
-refSwitch :: forall v2 t . (TensorType t) =>
-             Tensor Ref t -- ^ __data__: The ref tensor to be forwarded to the appropriate output.
-             -> Tensor v2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data.
-             -> Build ((Tensor Ref t, Tensor Ref t))
-             -- ^ (__output_false__, __output_true__)
-             --
-             -- * __output_false__: If `pred` is false, data will be forwarded to this output.
-             --
-             -- * __output_true__: If `pred` is true, data will be forwarded to this output.
-refSwitch data' pred | eqLengthGuard [] =
-    buildOp (opDef "RefSwitch"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        data' pred
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The ref tensor to be forwarded to the appropriate output."
-  is_ref: true
-  name: "data"
-  type_attr: "T"
-}
-input_arg {
-  description: "A scalar that specifies which output port will receive data."
-  name: "pred"
-  type: DT_BOOL
-}
-output_arg {
-  description: "If `pred` is false, data will be forwarded to this output."
-  is_ref: true
-  name: "output_false"
-  type_attr: "T"
-}
-output_arg {
-  description: "If `pred` is true, data will be forwarded to this output."
-  is_ref: true
-  name: "output_true"
-  type_attr: "T"
-}
--}
-
--- | Makes its input available to the next iteration.
-
-nextIteration :: forall v1 t . (TensorType t) =>
-                 Tensor v1 t -- ^ __data__: The tensor to be made available to the next iteration.
-                 -> Tensor Value t -- ^ __output__: The same tensor as `data`.
-nextIteration data' | eqLengthGuard [] =
-    buildOp (opDef "NextIteration"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        data'
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The tensor to be made available to the next iteration."
-  name: "data"
-  type_attr: "T"
-}
-output_arg {
-  description: "The same tensor as `data`."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Makes its input available to the next iteration.
-
-refNextIteration :: forall t . (TensorType t) =>
-                    Tensor Ref t -- ^ __data__: The tensor to be made available to the next iteration.
-                    -> Build (Tensor Ref t) -- ^ __output__: The same tensor as `data`.
-refNextIteration data' | eqLengthGuard [] =
-    buildOp (opDef "RefNextIteration"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        data'
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The tensor to be made available to the next iteration."
-  is_ref: true
-  name: "data"
-  type_attr: "T"
-}
-output_arg {
-  description: "The same tensor as `data`."
-  is_ref: true
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Multiplies slices of two tensors in batches.
---
--- Multiplies all slices of `Tensor` `x` and `y` (each slice can be
--- viewed as an element of a batch), and arranges the individual results
--- in a single output tensor of the same batch size. Each of the
--- individual slices can optionally be adjointed (to adjoint a matrix
--- means to transpose and conjugate it) before multiplication by setting
--- the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
--- 
--- The input tensors `x` and `y` are 3-D or higher with shape `[..., r_x, c_x]`
--- and `[..., r_y, c_y]`.
--- 
--- The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, where:
--- 
---     r_o = c_x if adj_x else r_x
---     c_o = r_y if adj_y else c_y
--- 
--- It is computed as:
--- 
---     output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
-batchMatMul :: forall v1 v2 t . (TensorType t,
-                                 OneOf '[(Data.Complex.Complex Double),
-                                         (Data.Complex.Complex Float),
-                                         Data.Int.Int32, Data.Word.Word16,
-                                         Double, Float] t) =>
-               Tensor v1 t -- ^ __x__: 3-D or higher with shape `[..., r_x, c_x]`.
-               -> Tensor v2 t -- ^ __y__: 3-D or higher with shape `[..., r_y, c_y]`.
-               -> Tensor Value t -- ^ __output__: 3-D or higher with shape `[..., r_o, c_o]`
-batchMatMul x y | eqLengthGuard [] =
-    buildOp (opDef "BatchMatMul"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, adjoint the slices of `x`. Defaults to `False`."
-  name: "adj_x"
-  type: "bool"
-}
-attr {
-  default_value { b: false }
-  description: "If `True`, adjoint the slices of `y`. Defaults to `False`."
-  name: "adj_y"
-  type: "bool"
-}
-input_arg {
-  description: "3-D or higher with shape `[..., r_x, c_x]`."
-  name: "x"
-  type_attr: "T"
-}
-input_arg {
-  description: "3-D or higher with shape `[..., r_y, c_y]`."
-  name: "y"
-  type_attr: "T"
-}
-output_arg {
-  description: "3-D or higher with shape `[..., r_o, c_o]`"
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Forwards the `index`th element of `inputs` to `output`.
-
-refSelect :: forall v1 t . (TensorType t) =>
-             Tensor v1 Data.Int.Int32 -- ^ __index__: A scalar that determines the input that gets selected.
-             -> [Tensor Ref t] -- ^ __inputs__: A list of ref tensors, one of which will be forwarded to `output`.
-             -> Build (Tensor Ref t) -- ^ __output__: The forwarded tensor.
-refSelect index inputs | eqLengthGuard [("N", [("inputs", length inputs)])] =
-    buildOp (opDef "RefSelect"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "N" .~ n)
-        index inputs
-  where
-    n = fromIntegral (length inputs) :: Int64
-{-
-attr { name: "T" type: "type" }
-attr { has_minimum: true minimum: 1 name: "N" type: "int" }
-input_arg {
-  description: "A scalar that determines the input that gets selected."
-  name: "index"
-  type: DT_INT32
-}
-input_arg {
-  description: "A list of ref tensors, one of which will be forwarded to `output`."
-  is_ref: true
-  name: "inputs"
-  number_attr: "N"
-  type_attr: "T"
-}
-output_arg {
-  description: "The forwarded tensor."
-  is_ref: true
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Computes the mean of elements across dimensions of a tensor.
---
--- Reduces `input` along the dimensions given in `reduction_indices`. Unless
--- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
--- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
--- retained with length 1.
-mean :: forall v1 v2 t tidx . (TensorType t,
-                               OneOf '[(Data.Complex.Complex Double),
-                                       (Data.Complex.Complex Float),
-                                       Data.Int.Int16, Data.Int.Int32,
-                                       Data.Int.Int64, Data.Int.Int8,
-                                       Data.Word.Word16, Data.Word.Word8,
-                                       Double, Float] t, TensorType tidx,
-                               OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) =>
-        Tensor v1 t -- ^ __input__: The tensor to reduce.
-        -> Tensor v2 tidx -- ^ __reduction_indices__: The dimensions to reduce.
-        -> Tensor Value t -- ^ __output__: The reduced tensor.
-mean input reduction_indices | eqLengthGuard [] =
-    buildOp (opDef "Mean"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tidx" .~ tensorType (undefined :: tidx))
-        input reduction_indices
-{-
-attr {
-  default_value { b: false }
-  description: "If true, retain reduced dimensions with length 1."
-  name: "keep_dims"
-  type: "bool"
-}
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  default_value { type: DT_INT32 }
-  name: "Tidx"
-  type: "type"
-}
-input_arg {
-  description: "The tensor to reduce." name: "input" type_attr: "T"
-}
-input_arg {
-  description: "The dimensions to reduce."
-  name: "reduction_indices"
-  type_attr: "Tidx"
-}
-output_arg {
-  description: "The reduced tensor." name: "output" type_attr: "T"
-}
--}
-
--- | Adds sparse updates to a variable reference.
---
--- This operation computes
--- 
---     # Scalar indices
---     ref[indices, ...] += updates[...]
--- 
---     # Vector indices (for each i)
---     ref[indices[i], ...] += updates[i, ...]
--- 
---     # High rank indices (for each i, ..., j)
---     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
--- 
--- This operation outputs `ref` after the update is done.
--- This makes it easier to chain operations that need to use the reset value.
--- 
--- Duplicate entries are handled correctly: if multiple `indices` reference
--- the same location, their contributions add.
--- 
--- Requires `updates.shape = indices.shape + ref.shape[1:]`.
--- 
--- <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
--- <img style="width:100%" src="../../images/ScatterAdd.png" alt>
--- </div>
-scatterAdd :: forall v2 v3 t tindices . (TensorType t,
-                                         OneOf '[(Data.Complex.Complex Double),
-                                                 (Data.Complex.Complex Float),
-                                                 Data.Int.Int16, Data.Int.Int32,
-                                                 Data.Int.Int64, Data.Int.Int8,
-                                                 Data.Word.Word16,
-                                                 Data.Word.Word8, Double,
-                                                 Float] t, TensorType tindices,
-                                         OneOf '[Data.Int.Int32,
-                                                 Data.Int.Int64] tindices) =>
-              Tensor Ref t -- ^ __ref__: Should be from a `Variable` node.
-              -> Tensor v2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`.
-              -> Tensor v3 t -- ^ __updates__: A tensor of updated values to add to `ref`.
-              -> Build (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`.  Returned as a convenience for operations that want
-              -- to use the updated values after the update is done.
-scatterAdd ref indices updates | eqLengthGuard [] =
-    buildOp (opDef "ScatterAdd"
-             & opAttr "T" .~ tensorType (undefined :: t)
-             & opAttr "Tindices" .~ tensorType (undefined :: tindices))
-        ref indices updates
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "Tindices"
-  type: "type"
-}
-attr {
-  default_value { b: false }
-  description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
-  name: "use_locking"
-  type: "bool"
-}
-input_arg {
-  description: "Should be from a `Variable` node."
-  is_ref: true
-  name: "ref"
-  type_attr: "T"
-}
-input_arg {
-  description: "A tensor of indices into the first dimension of `ref`."
-  name: "indices"
-  type_attr: "Tindices"
-}
-input_arg {
-  description: "A tensor of updated values to add to `ref`."
-  name: "updates"
-  type_attr: "T"
-}
-output_arg {
-  description: "= Same as `ref`.  Returned as a convenience for operations that want\nto use the updated values after the update is done."
-  is_ref: true
-  name: "output_ref"
-  type_attr: "T"
-}
--}
-
--- | Randomly crop `image`.
---
--- `size` is a 1-D int64 tensor with 2 elements representing the crop height and
--- width.  The values must be non negative.
--- 
--- This Op picks a random location in `image` and crops a `height` by `width`
--- rectangle from that location.  The random location is picked so the cropped
--- area will fit inside the original image.
-randomCrop :: forall v1 v2 t . (TensorType t, OneOf '[Data.Int.Int16,
-                                                      Data.Int.Int32,
-                                                      Data.Int.Int64,
-                                                      Data.Int.Int8,
-                                                      Data.Word.Word8, Double,
-                                                      Float] t) =>
-              Tensor v1 t -- ^ __image__: 3-D of shape `[height, width, channels]`.
-              -> Tensor v2 Data.Int.Int64 -- ^ __size__: 1-D of length 2 containing: `crop_height`, `crop_width`..
-              -> Build (Tensor Value t) -- ^ __output__: 3-D of shape `[crop_height, crop_width, channels].`
-randomCrop image size | eqLengthGuard [] =
-    buildOp (opDef "RandomCrop"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        image size
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_UINT8
-      type: DT_INT8
-      type: DT_INT16
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_FLOAT
-      type: DT_DOUBLE
-    }
-  }
-  name: "T"
-  type: "type"
-}
-attr {
-  default_value { i: 0 }
-  description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
-  name: "seed"
-  type: "int"
-}
-attr {
-  default_value { i: 0 }
-  description: "An second seed to avoid seed collision."
-  name: "seed2"
-  type: "int"
-}
-input_arg {
-  description: "3-D of shape `[height, width, channels]`."
-  name: "image"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D of length 2 containing: `crop_height`, `crop_width`.."
-  name: "size"
-  type: DT_INT64
-}
-output_arg {
-  description: "3-D of shape `[crop_height, crop_width, channels].`"
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Exits the current frame to its parent frame.
---
--- Exit makes its input `data` available to the parent frame.
-refExit :: forall t . (TensorType t) =>
-           Tensor Ref t -- ^ __data__: The tensor to be made available to the parent frame.
-           -> Build (Tensor Ref t) -- ^ __output__: The same tensor as `data`.
-refExit data' | eqLengthGuard [] =
-    buildOp (opDef "RefExit"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        data'
-{-
-attr { name: "T" type: "type" }
-input_arg {
-  description: "The tensor to be made available to the parent frame."
-  is_ref: true
-  name: "data"
-  type_attr: "T"
-}
-output_arg {
-  description: "The same tensor as `data`."
-  is_ref: true
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Produce a string tensor that encodes the state of a Reader.
---
--- Not all Readers support being serialized, so this can produce an
--- Unimplemented error.
-readerSerializeState :: Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader.
-                        -> Build (Tensor Value Data.ByteString.ByteString) -- ^ __state__
-readerSerializeState reader_handle | eqLengthGuard [] =
-    buildOp (opDef "ReaderSerializeState")
-        reader_handle
-{-
-input_arg {
-  description: "Handle to a Reader."
-  is_ref: true
-  name: "reader_handle"
-  type: DT_STRING
-}
-output_arg { name: "state" type: DT_STRING }
--}
-
--- | Computes the gradient for the tanh of `x` wrt its input.
---
--- Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
--- is the corresponding input gradient.
-tanhGrad :: forall v1 v2 t . (TensorType t,
-                              OneOf '[(Data.Complex.Complex Double),
-                                      (Data.Complex.Complex Float),
-                                      Data.Word.Word16, Double, Float] t) =>
-            Tensor v1 t -- ^ __x__
-            -> Tensor v2 t -- ^ __y__
-            -> Tensor Value t -- ^ __z__
-tanhGrad x y | eqLengthGuard [] =
-    buildOp (opDef "TanhGrad"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        x y
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_HALF
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg { name: "x" type_attr: "T" }
-input_arg { name: "y" type_attr: "T" }
-output_arg { name: "z" type_attr: "T" }
--}
-
--- | Returns the element-wise max of two SparseTensors.
---
--- Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
-sparseSparseMaximum :: forall v1 v2 v3 v4 v5 v6 t . (TensorType t,
-                                                     OneOf '[Data.Int.Int16,
-                                                             Data.Int.Int32,
-                                                             Data.Int.Int64,
-                                                             Data.Int.Int8,
-                                                             Data.Word.Word16,
-                                                             Data.Word.Word8,
-                                                             Double,
-                                                             Float] t) =>
-                       Tensor v1 Data.Int.Int64 -- ^ __a_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
-                                                -- SparseTensor, in the canonical lexicographic ordering.
-                       -> Tensor v2 t -- ^ __a_values__: 1-D.  `N` non-empty values corresponding to `a_indices`.
-                       -> Tensor v3 Data.Int.Int64 -- ^ __a_shape__: 1-D.  Shape of the input SparseTensor.
-                       -> Tensor v4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand.
-                       -> Tensor v5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype.
-                       -> Tensor v6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal.
-                       -> (Tensor Value Data.Int.Int64, Tensor Value t)
-                       -- ^ (__output_indices__, __output_values__)
-                       --
-                       -- * __output_indices__: 2-D.  The indices of the output SparseTensor.
-                       --
-                       -- * __output_values__: 1-D.  The values of the output SparseTensor.
-sparseSparseMaximum a_indices a_values a_shape b_indices b_values
-                    b_shape | eqLengthGuard [] =
-    buildOp (opDef "SparseSparseMaximum"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        a_indices a_values a_shape b_indices b_values b_shape
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT32
-      type: DT_INT64
-      type: DT_UINT8
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_UINT16
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering."
-  name: "a_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  `N` non-empty values corresponding to `a_indices`."
-  name: "a_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  Shape of the input SparseTensor."
-  name: "a_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "counterpart to `a_indices` for the other operand."
-  name: "b_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "counterpart to `a_values` for the other operand; must be of the same dtype."
-  name: "b_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "counterpart to `a_shape` for the other operand; the two shapes must be equal."
-  name: "b_shape"
-  type: DT_INT64
-}
-output_arg {
-  description: "2-D.  The indices of the output SparseTensor."
-  name: "output_indices"
-  type: DT_INT64
-}
-output_arg {
-  description: "1-D.  The values of the output SparseTensor."
-  name: "output_values"
-  type_attr: "T"
-}
--}
-
--- | Decode the first frame of a GIF-encoded image to a uint8 tensor.
---
--- GIF with frame or transparency compression are not supported
--- convert animated GIF from compressed to uncompressed by:
--- 
--- convert $src.gif -coalesce $dst.gif
-decodeGif :: Tensor v1 Data.ByteString.ByteString -- ^ __contents__: 0-D.  The GIF-encoded image.
-             -> Tensor Value Data.Word.Word8 -- ^ __image__: 4-D with shape `[num_frames, height, width, 3]`. RGB order
-decodeGif contents | eqLengthGuard [] =
-    buildOp (opDef "DecodeGif")
-        contents
-{-
-input_arg {
-  description: "0-D.  The GIF-encoded image."
-  name: "contents"
-  type: DT_STRING
-}
-output_arg {
-  description: "4-D with shape `[num_frames, height, width, 3]`. RGB order"
-  name: "image"
-  type: DT_UINT8
-}
--}
-
--- | Return substrings from `Tensor` of strings.
---
--- For each string in the input `Tensor`, creates a substring starting at index
--- `pos` with a total length of `len`.
--- 
--- If `len` defines a substring that would extend beyond the length of the input
--- string, then as many characters as possible are used.
--- 
--- If `pos` is negative or specifies a character index larger than any of the input
--- strings, then an `InvalidArgumentError` is thrown.
--- 
--- `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on
--- Op creation.
--- 
--- *NOTE*: `Substr` supports broadcasting up to two dimensions. More about
--- broadcasting
--- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
--- 
--- ---
--- 
--- Examples
--- 
--- Using scalar `pos` and `len`:
--- 
--- ```
--- input = [b'Hello', b'World']
--- position = 1
--- length = 3
--- 
--- output = [b'ell', b'orl']
--- ```
--- 
--- Using `pos` and `len` with same shape as `input`:
--- 
--- ```
--- input = [[b'ten', b'eleven', b'twelve'],
---          [b'thirteen', b'fourteen', b'fifteen'],
---          [b'sixteen', b'seventeen', b'eighteen']]
--- position = [[1, 2, 3],
---             [1, 2, 3],
---             [1, 2, 3]]
--- length =   [[2, 3, 4],
---             [4, 3, 2],
---             [5, 5, 5]]
--- 
--- output = [[b'en', b'eve', b'lve'],
---           [b'hirt', b'urt', b'te'],
---           [b'ixtee', b'vente', b'hteen']]
--- ```
--- 
--- Broadcasting `pos` and `len` onto `input`:
--- 
--- ```
--- input = [[b'ten', b'eleven', b'twelve'],
---          [b'thirteen', b'fourteen', b'fifteen'],
---          [b'sixteen', b'seventeen', b'eighteen'],
---          [b'nineteen', b'twenty', b'twentyone']]
--- position = [1, 2, 3]
--- length =   [1, 2, 3]
--- 
--- output = [[b'e', b'ev', b'lve'],
---           [b'h', b'ur', b'tee'],
---           [b'i', b've', b'hte'],
---           [b'i', b'en', b'nty']]
--- ```
--- 
--- Broadcasting `input` onto `pos` and `len`:
--- 
--- ```
--- input = b'thirteen'
--- position = [1, 5, 7]
--- length =   [3, 2, 1]
--- 
--- output = [b'hir', b'ee', b'n"]
--- ```
-substr :: forall v1 v2 v3 t . (TensorType t, OneOf '[Data.Int.Int32,
-                                                     Data.Int.Int64] t) =>
-          Tensor v1 Data.ByteString.ByteString -- ^ __input__: Tensor of strings
-          -> Tensor v2 t -- ^ __pos__: Scalar defining the position of first character in each substring
-          -> Tensor v3 t -- ^ __len__: Scalar defining the number of characters to include in each substring
-          -> Tensor Value Data.ByteString.ByteString -- ^ __output__: Tensor of substrings
-substr input pos len | eqLengthGuard [] =
-    buildOp (opDef "Substr"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        input pos len
-{-
-attr {
-  allowed_values { list { type: DT_INT32 type: DT_INT64 } }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "Tensor of strings" name: "input" type: DT_STRING
-}
-input_arg {
-  description: "Scalar defining the position of first character in each substring"
-  name: "pos"
-  type_attr: "T"
-}
-input_arg {
-  description: "Scalar defining the number of characters to include in each substring"
-  name: "len"
-  type_attr: "T"
-}
-output_arg {
-  description: "Tensor of substrings" name: "output" type: DT_STRING
-}
--}
-
--- | Updates the table to associates keys with values.
---
--- The tensor `keys` must be of the same type as the keys of the table.
--- The tensor `values` must be of the type of the table values.
-lookupTableInsert :: forall v2 v3 tin tout . (TensorType tin,
-                                              TensorType tout) =>
-                     Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
-                     -> Tensor v2 tin -- ^ __keys__: Any shape.  Keys to look up.
-                     -> Tensor v3 tout -- ^ __values__: Values to associate with keys.
-                     -> Build (ControlNode)
-lookupTableInsert table_handle keys values | eqLengthGuard [] =
-    buildOp (opDef "LookupTableInsert"
-             & opAttr "Tin" .~ tensorType (undefined :: tin)
-             & opAttr "Tout" .~ tensorType (undefined :: tout))
-        table_handle keys values
-{-
-attr { name: "Tin" type: "type" }
-attr { name: "Tout" type: "type" }
-input_arg {
-  description: "Handle to the table."
-  is_ref: true
-  name: "table_handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "Any shape.  Keys to look up."
-  name: "keys"
-  type_attr: "Tin"
-}
-input_arg {
-  description: "Values to associate with keys."
-  name: "values"
-  type_attr: "Tout"
-}
--}
-
--- | Component-wise divides a SparseTensor by a dense Tensor.
---
--- *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
--- the other direction.
-sparseDenseCwiseDiv :: forall v1 v2 v3 v4 t . (TensorType t,
-                                               OneOf '[(Data.Complex.Complex Double),
-                                                       (Data.Complex.Complex Float),
-                                                       Data.Int.Int16,
-                                                       Data.Int.Int32,
-                                                       Data.Int.Int64,
-                                                       Data.Int.Int8,
-                                                       Data.Word.Word16,
-                                                       Data.Word.Word8, Double,
-                                                       Float] t) =>
-                       Tensor v1 Data.Int.Int64 -- ^ __sp_indices__: 2-D.  `N x R` matrix with the indices of non-empty values in a
-                                                -- SparseTensor, possibly not in canonical ordering.
-                       -> Tensor v2 t -- ^ __sp_values__: 1-D.  `N` non-empty values corresponding to `sp_indices`.
-                       -> Tensor v3 Data.Int.Int64 -- ^ __sp_shape__: 1-D.  Shape of the input SparseTensor.
-                       -> Tensor v4 t -- ^ __dense__: `R`-D.  The dense Tensor operand.
-                       -> Tensor Value t -- ^ __output__: 1-D.  The `N` values that are operated on.
-sparseDenseCwiseDiv sp_indices sp_values sp_shape dense | eqLengthGuard [] =
-    buildOp (opDef "SparseDenseCwiseDiv"
-             & opAttr "T" .~ tensorType (undefined :: t))
-        sp_indices sp_values sp_shape dense
-{-
-attr {
-  allowed_values {
-    list {
-      type: DT_FLOAT
-      type: DT_DOUBLE
-      type: DT_INT64
-      type: DT_INT32
-      type: DT_UINT8
-      type: DT_UINT16
-      type: DT_INT16
-      type: DT_INT8
-      type: DT_COMPLEX64
-      type: DT_COMPLEX128
-      type: DT_QINT8
-      type: DT_QUINT8
-      type: DT_QINT32
-      type: DT_HALF
-    }
-  }
-  name: "T"
-  type: "type"
-}
-input_arg {
-  description: "2-D.  `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering."
-  name: "sp_indices"
-  type: DT_INT64
-}
-input_arg {
-  description: "1-D.  `N` non-empty values corresponding to `sp_indices`."
-  name: "sp_values"
-  type_attr: "T"
-}
-input_arg {
-  description: "1-D.  Shape of the input SparseTensor."
-  name: "sp_shape"
-  type: DT_INT64
-}
-input_arg {
-  description: "`R`-D.  The dense Tensor operand."
-  name: "dense"
-  type_attr: "T"
-}
-output_arg {
-  description: "1-D.  The `N` values that are operated on."
-  name: "output"
-  type_attr: "T"
-}
--}
-
--- | Replaces the contents of the table with the specified keys and values.
---
--- The tensor `keys` must be of the same type as the keys of the table.
--- The tensor `values` must be of the type of the table values.
-lookupTableImport :: forall v2 v3 tin tout . (TensorType tin,
-                                              TensorType tout) =>
-                     Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table.
-                     -> Tensor v2 tin -- ^ __keys__: Any shape.  Keys to look up.
-                     -> Tensor v3 tout -- ^ __values__: Values to associate with keys.
-                     -> Build (ControlNode)
-lookupTableImport table_handle keys values | eqLengthGuard [] =
-    buildOp (opDef "LookupTableImport"
-             & opAttr "Tin" .~ tensorType (undefined :: tin)
-             & opAttr "Tout" .~ tensorType (undefined :: tout))
-        table_handle keys values
-{-
-attr { name: "Tin" type: "type" }
-attr { name: "Tout" type: "type" }
-input_arg {
-  description: "Handle to the table."
-  is_ref: true
-  name: "table_handle"
-  type: DT_STRING
-}
-input_arg {
-  description: "Any shape.  Keys to look up."
-  name: "keys"
-  type_attr: "Tin"
-}
-input_arg {
-  description: "Values to associate with keys."
-  name: "values"
-  type_attr: "Tout"
-}
--}
-
- diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-core-ops-0.1.0.0/src/hscolour.css deleted file mode 100644 index c15919e..0000000 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/src/hscolour.css +++ /dev/null @@ -1,5 +0,0 @@ -.hs-keyglyph, .hs-layout {color: red;} -.hs-keyword {color: blue;} -.hs-comment, .hs-comment a {color: green;} -.hs-str, .hs-chr {color: teal;} -.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt b/docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt index eeaa297..3645dcd 100644 --- a/docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt +++ b/docs/haddock/tensorflow-core-ops-0.1.0.0/tensorflow-core-ops.txt @@ -4,53 +4,1901 @@ -- | Haskell wrappers for Core Tensorflow Ops. -- --- Code generated signatures for the Ops in libtensorflow_c. +-- Code generated signatures for the Ops in libtensorflow. @package tensorflow-core-ops @version 0.1.0.0 module TensorFlow.GenOps.Core --- | Receives the named tensor from send_device on recv_device. +-- | Raise a exception to abort the process when called. If +-- exit_without_error is true, the process will exit normally, otherwise +-- it will exit with a SIGABORT signal. -- --- _HostRecv requires its input on host memory whereas _Recv requires its --- input on device memory. -_HostRecv :: (TensorType tensor_type) => Int64 -> Build (Tensor Value tensor_type) +-- Returns nothing but an exception. +abort :: (MonadBuild m') => m' (ControlNode) +abort' :: (MonadBuild m') => OpParams -> m' (ControlNode) --- | Sends the named tensor from send_device to recv_device. +-- | Computes the absolute value of a tensor. -- --- _HostSend requires its input on host memory whereas _Send requires its --- input on device memory. -_HostSend :: (TensorType t) => Int64 -> Tensor v1 t -> Build (ControlNode) +-- Given a tensor x, this operation returns a tensor containing +-- the absolute value of each element in x. For example, if x is +-- an input element and y is an output element, this operation computes +-- \(y = |x|\). +abs :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +abs' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t --- | Receives the named tensor from send_device on recv_device. -_Recv :: (TensorType tensor_type) => Int64 -> Build (Tensor Value tensor_type) - --- | Sends the named tensor from send_device to recv_device. -_Send :: (TensorType t) => Int64 -> Tensor v1 t -> Build (ControlNode) - --- | Does nothing. Only useful as a placeholder for control edges. -noOp :: ControlNode - --- | A graph node which represents a return value of a function. -_Retval :: (TensorType t) => Int64 -> Tensor v1 t -> Build (ControlNode) - --- | A graph node which represents an argument to a function. -_Arg :: (TensorType t) => Int64 -> Build (Tensor Value t) - --- | Quantized Batch normalization. +-- | Applies a gradient to a given accumulator. Does not add if local_step +-- is lesser -- --- This op is deprecated and will be removed in the future. Prefer --- `tf.nn.batch_normalization`. -quantizedBatchNormWithGlobalNormalization :: (TensorType tinput, OneOf '[Int16, Int32, Word16, Word8] tinput, TensorType out_type, OneOf '[Int16, Int32, Word16, Word8] out_type) => Bool -> Float -> Tensor v1 tinput -> Tensor v2 Float -> Tensor v3 Float -> Tensor v4 tinput -> Tensor v5 Float -> Tensor v6 Float -> Tensor v7 tinput -> Tensor v8 Float -> Tensor v9 Float -> Tensor v10 tinput -> Tensor v11 Float -> Tensor v12 Float -> Tensor v13 tinput -> Tensor v14 Float -> Tensor v15 Float -> (Tensor Value out_type, Tensor Value Float, Tensor Value Float) +-- than the accumulator's global_step. +accumulatorApplyGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 dtype -> m' (ControlNode) +accumulatorApplyGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 dtype -> m' (ControlNode) --- | Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` -quantizedRelu6 :: (TensorType tinput, OneOf '[Int16, Int32, Word16, Word8] tinput, TensorType out_type, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v1 tinput -> Tensor v2 Float -> Tensor v3 Float -> (Tensor Value out_type, Tensor Value Float, Tensor Value Float) +-- | Returns the number of gradients aggregated in the given accumulators. +accumulatorNumAccumulated :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) +accumulatorNumAccumulated' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) --- | Adds Tensor bias to Tensor input for Quantized --- types. +-- | Updates the accumulator with a new value for global_step. Logs warning +-- if the -- --- Broadcasts the values of bias on dimensions 0..N-2 of input. -quantizedBiasAdd :: (TensorType t1, OneOf '[Int16, Int32, Word16, Word8] t1, TensorType t2, OneOf '[Int16, Int32, Word16, Word8] t2, TensorType out_type, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor v3 Float -> Tensor v4 Float -> Tensor v5 Float -> Tensor v6 Float -> (Tensor Value out_type, Tensor Value Float, Tensor Value Float) +-- accumulator's value is already higher than new_global_step. +accumulatorSetGlobalStep :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 Int64 -> m' (ControlNode) +accumulatorSetGlobalStep' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int64 -> m' (ControlNode) + +-- | Extracts the average gradient in the given ConditionalAccumulator, +-- provided +-- +-- that sufficient (i.e., more than num_required) gradients have been +-- accumulated. The op blocks until sufficient gradients have been +-- accumulated. If the accumulator has already aggregated more than +-- num_required gradients, it returns the average of the accumulated +-- gradients. Also automatically increments the recorded global_step in +-- the accumulator by 1, and resets the aggregate to 0. +accumulatorTakeGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value dtype) +accumulatorTakeGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (Tensor Value dtype) + +-- | Computes acos of x element-wise. +acos :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +acos' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Returns x + y element-wise. +-- +--
    +--
  • NOTE*: Add supports broadcasting. AddN does not. +-- More about broadcasting here
  • +--
+add :: (OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +add' :: (OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Add an N-minibatch SparseTensor to a +-- SparseTensorsMap, return N handles. +-- +-- A SparseTensor of rank R is represented by three +-- tensors: sparse_indices, sparse_values, and +-- sparse_shape, where +-- +-- ```sparse_indices.shape[1] == sparse_shape.shape[0] == R``` +-- +-- An N-minibatch of SparseTensor objects is +-- represented as a SparseTensor having a first +-- sparse_indices column taking values between `[0, N)`, where +-- the minibatch size `N == sparse_shape[0]`. +-- +-- The input SparseTensor must have rank R greater than +-- 1, and the first dimension is treated as the minibatch dimension. +-- Elements of the SparseTensor must be sorted in increasing +-- order of this first dimension. The stored SparseTensor +-- objects pointed to by each row of the output sparse_handles +-- will have rank `R-1`. +-- +-- The SparseTensor values can then be read out as part of a +-- minibatch by passing the given keys as vector elements to +-- TakeManySparseFromTensorsMap. To ensure the correct +-- SparseTensorsMap is accessed, ensure that the same +-- container and shared_name are passed to that Op. If +-- no shared_name is provided here, instead use the *name* of +-- the Operation created by calling AddManySparseToTensorsMap as +-- the shared_name passed to +-- TakeManySparseFromTensorsMap. Ensure the Operations are +-- colocated. +addManySparseToTensorsMap :: (MonadBuild m', TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) +addManySparseToTensorsMap' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) + +-- | Add all input tensors element wise. +addN :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => [Tensor v'1 t] -> Tensor Build t +addN' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> [Tensor v'1 t] -> Tensor Build t + +-- | Add a SparseTensor to a SparseTensorsMap return its +-- handle. +-- +-- A SparseTensor is represented by three tensors: +-- sparse_indices, sparse_values, and +-- sparse_shape. +-- +-- This operator takes the given SparseTensor and adds it to a +-- container object (a SparseTensorsMap). A unique key within +-- this container is generated in the form of an int64, and this +-- is the value that is returned. +-- +-- The SparseTensor can then be read out as part of a minibatch +-- by passing the key as a vector element to +-- TakeManySparseFromTensorsMap. To ensure the correct +-- SparseTensorsMap is accessed, ensure that the same +-- container and shared_name are passed to that Op. If +-- no shared_name is provided here, instead use the *name* of +-- the Operation created by calling AddSparseToTensorsMap as the +-- shared_name passed to TakeManySparseFromTensorsMap. +-- Ensure the Operations are colocated. +addSparseToTensorsMap :: (MonadBuild m', TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) +addSparseToTensorsMap' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> m' (Tensor Value Int64) + +-- | Deprecated. Disallowed in GraphDef version >= 2. +adjustContrast :: (OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor Build Float +adjustContrast' :: (OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor Build Float + +-- | Adjust the contrast of one or more images. +-- +-- images is a tensor of at least 3 dimensions. The last 3 +-- dimensions are interpreted as `[height, width, channels]`. The other +-- dimensions only represent a collection of images, such as `[batch, +-- height, width, channels].` +-- +-- Contrast is adjusted independently for each channel of each image. +-- +-- For each channel, the Op first computes the mean of the image pixels +-- in the channel and then adjusts each component of each pixel to `(x - +-- mean) * contrast_factor + mean`. +adjustContrastv2 :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float +adjustContrastv2' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float + +-- | Adjust the hue of one or more images. +-- +-- images is a tensor of at least 3 dimensions. The last +-- dimension is interpretted as channels, and must be three. +-- +-- The input image is considered in the RGB colorspace. Conceptually, the +-- RGB colors are first mapped into HSV. A delta is then applied all the +-- hue values, and then remapped back to RGB colorspace. +adjustHue :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float +adjustHue' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float + +-- | Adjust the saturation of one or more images. +-- +-- images is a tensor of at least 3 dimensions. The last +-- dimension is interpretted as channels, and must be three. +-- +-- The input image is considered in the RGB colorspace. Conceptually, the +-- RGB colors are first mapped into HSV. A scale is then applied all the +-- saturation values, and then remapped back to RGB colorspace. +adjustSaturation :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float +adjustSaturation' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float + +-- | Computes the "logical and" of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +all :: (OneOf '[Int32, Int64] tidx) => Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool +all' :: (OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool + +-- | Generates labels for candidate sampling with a learned unigram +-- distribution. +-- +-- See explanations of candidate sampling and the data formats at +-- go/candidate-sampling. +-- +-- For each batch, this op picks a single set of sampled candidate +-- labels. +-- +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +allCandidateSampler :: Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) +allCandidateSampler' :: OpParams -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) + +-- | Computes the "logical or" of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +any :: (OneOf '[Int32, Int64] tidx) => Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool +any' :: (OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 Bool -> Tensor v'2 tidx -> Tensor Build Bool + +-- | Update '*var' according to the adadelta scheme. +-- +-- accum = rho() * accum + (1 - rho()) * grad.square(); update = +-- (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; +-- update_accum = rho() * update_accum + (1 - rho()) * update.square(); +-- var -= update; +applyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (Tensor Ref t) +applyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (Tensor Ref t) + +-- | Update '*var' according to the adagrad scheme. +-- +-- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) +applyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> m' (Tensor Ref t) +applyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> m' (Tensor Ref t) + +-- | Update '*var' according to the proximal adagrad scheme. +applyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (Tensor Ref t) +applyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (Tensor Ref t) + +-- | Update '*var' according to the Adam algorithm. +-- +-- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- +-- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - +-- beta2) * g_t * g_t variable <- variable - lr_t * m_t / (sqrt(v_t) + +-- epsilon) +applyAdam :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (Tensor Ref t) +applyAdam' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (Tensor Ref t) + +-- | Update '*var' according to the centered RMSProp algorithm. +-- +-- The centered RMSProp algorithm uses an estimate of the centered second +-- moment (i.e., the variance) for normalization, as opposed to regular +-- RMSProp, which uses the (uncentered) second moment. This often helps +-- with training, but is slightly more expensive in terms of computation +-- and memory. +-- +-- Note that in dense implementation of this algorithm, mg, ms, and mom +-- will update even if the grad is zero, but in this sparse +-- implementation, mg, ms, and mom will not update in iterations during +-- which the grad is zero. +-- +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 +-- mean_grad = decay * mean_grad + (1-decay) * gradient +-- +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - +-- mean_grad ** 2) +-- +-- mg <- rho * mg_{t-1} + (1-rho) * grad ms <- rho * ms_{t-1} + +-- (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / +-- sqrt(ms - mg * mg + epsilon) var <- var - mom +applyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) +applyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) + +-- | Update '*var' according to the Ftrl-proximal scheme. +-- +-- accum_new = accum + grad * grad linear += grad + +-- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 +-- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - +-- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new +applyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) +applyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) + +-- | Update '*var' by subtracting alpha * delta from it. +applyGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> m' (Tensor Ref t) +applyGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> m' (Tensor Ref t) + +-- | Update '*var' according to the momentum scheme. Set use_nesterov = +-- True if you +-- +-- want to use Nesterov momentum. +-- +-- accum = accum * momentum + grad var -= lr * accum +applyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) +applyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) + +-- | Update '*var' and '*accum' according to FOBOS with Adagrad learning +-- rate. +-- +-- accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var +-- = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} +applyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (Tensor Ref t) +applyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (Tensor Ref t) + +-- | Update '*var' as FOBOS algorithm with fixed learning rate. +-- +-- prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) * +-- max{|prox_v|-alpha*l1,0} +applyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) +applyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (Tensor Ref t) + +-- | Update '*var' according to the RMSProp algorithm. +-- +-- Note that in dense implementation of this algorithm, ms and mom will +-- update even if the grad is zero, but in this sparse implementation, ms +-- and mom will not update in iterations during which the grad is zero. +-- +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = +-- learning_rate * gradient / sqrt(mean_square + epsilon) +-- +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * +-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom +applyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) +applyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (Tensor Ref t) + +-- | Returns the index with the largest value across dimensions of a +-- tensor. +argMax :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 +argMax' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 + +-- | Returns the index with the smallest value across dimensions of a +-- tensor. +argMin :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 +argMin' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 + +-- | Converts each entry in the given tensor to strings. Supports many +-- numeric +-- +-- types and boolean. +asString :: (OneOf '[Complex Float, Bool, Int32, Int64, Int8, Double, Float] t) => Tensor v'1 t -> Tensor Build ByteString +asString' :: (OneOf '[Complex Float, Bool, Int32, Int64, Int8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString + +-- | Computes asin of x element-wise. +asin :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +asin' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Asserts that the given condition is true. +-- +-- If condition evaluates to false, print the list of tensors in +-- `data`. summarize determines how many entries of the tensors +-- to print. +assert :: (MonadBuild m', TensorTypes t) => Tensor v'1 Bool -> TensorList (v'2) t -> m' (ControlNode) +assert' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 Bool -> TensorList (v'2) t -> m' (ControlNode) + +-- | Update ref by assigning value to it. +-- +-- This operation outputs "ref" after the assignment is done. This makes +-- it easier to chain operations that need to use the reset value. +assign :: (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) +assign' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) + +-- | Update ref by adding value to it. +-- +-- This operation outputs "ref" after the update is done. This makes it +-- easier to chain operations that need to use the reset value. +assignAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) +assignAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) + +-- | Adds a value to the current value of a variable. +-- +-- Any ReadVariableOp which depends directly or indirectly on this assign +-- is guaranteed to see the incremented value or a subsequent newer one. +-- +-- Outputs the incremented value, which can be used to totally order the +-- increments to this variable. +assignAddVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) +assignAddVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) + +-- | Update ref by subtracting value from it. +-- +-- This operation outputs "ref" after the update is done. This makes it +-- easier to chain operations that need to use the reset value. +assignSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) +assignSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) + +-- | Assigns a new value to a variable. +-- +-- Any ReadVariableOp with a control dependency on this op is guaranteed +-- to return this value or a subsequent newer value of the variable. +assignVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) +assignVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 dtype -> m' (ControlNode) + +-- | Computes atan of x element-wise. +atan :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +atan' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Outputs a Summary protocol buffer with audio. +-- +-- The summary has up to max_outputs summary values containing +-- audio. The audio is built from tensor which must be 3-D with +-- shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, +-- frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` +-- with a sample rate of sample_rate. +-- +-- The tag argument is a scalar Tensor of type +-- string. It is used to build the tag of the summary +-- values: +-- +--
    +--
  • If max_outputs is 1, the summary value tag is +-- '*tag*/audio'.
  • +--
  • If max_outputs is greater than 1, the summary value tags +-- are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', +-- etc.
  • +--
+audioSummary :: Float -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build ByteString +audioSummary' :: OpParams -> Float -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build ByteString + +-- | Outputs a Summary protocol buffer with audio. +-- +-- The summary has up to max_outputs summary values containing +-- audio. The audio is built from tensor which must be 3-D with +-- shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, +-- frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` +-- with a sample rate of sample_rate. +-- +-- The tag argument is a scalar Tensor of type +-- string. It is used to build the tag of the summary +-- values: +-- +--
    +--
  • If max_outputs is 1, the summary value tag is +-- '*tag*/audio'.
  • +--
  • If max_outputs is greater than 1, the summary value tags +-- are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', +-- etc.
  • +--
+audioSummaryV2 :: Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build ByteString +audioSummaryV2' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build ByteString + +-- | Performs average pooling on the input. +-- +-- Each entry in output is the mean of the corresponding size +-- ksize window in value. +avgPool :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +avgPool' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Performs 3D average pooling on the input. +avgPool3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t +avgPool3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes gradients of average pooling function. +avgPool3DGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t +avgPool3DGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t + +-- | Computes gradients of the average pooling function. +avgPoolGrad :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t +avgPoolGrad' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t + +-- | Defines a barrier that persists across different graph executions. +-- +-- A barrier represents a key-value map, where each key is a string, and +-- each value is a tuple of tensors. +-- +-- At runtime, the barrier contains complete and +-- incomplete elements. A complete element has defined tensors +-- for all components of its value tuple, and may be accessed using +-- BarrierTakeMany. An incomplete element has some undefined components +-- in its value tuple, and may be updated using BarrierInsertMany. +barrier :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) +barrier' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) + +-- | Closes the given barrier. +-- +-- This operation signals that no more new elements will be inserted in +-- the given barrier. Subsequent InsertMany that try to introduce a new +-- key will fail. Subsequent InsertMany operations that just add missing +-- components to already existing elements will continue to succeed. +-- Subsequent TakeMany operations will continue to succeed if sufficient +-- completed elements remain in the barrier. Subsequent TakeMany +-- operations that would block will fail immediately. +barrierClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) +barrierClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) + +-- | Computes the number of incomplete elements in the given barrier. +barrierIncompleteSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) +barrierIncompleteSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) + +-- | For each key, assigns the respective value to the specified component. +-- +-- If a key is not found in the barrier, this operation will create a new +-- incomplete element. If a key is found in the barrier, and the element +-- already has a value at component_index, this operation will fail with +-- INVALID_ARGUMENT, and leave the barrier in an undefined state. +barrierInsertMany :: (MonadBuild m', TensorType t) => Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> Tensor v'3 t -> m' (ControlNode) +barrierInsertMany' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> Tensor v'3 t -> m' (ControlNode) + +-- | Computes the number of complete elements in the given barrier. +barrierReadySize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) +barrierReadySize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) + +-- | Takes the given number of completed elements from a barrier. +-- +-- This operation concatenates completed-element component tensors along +-- the 0th dimension to make a single component tensor. +-- +-- Elements come out of the barrier when they are complete, and in the +-- order in which they were placed into the barrier. The indices output +-- provides information about the batch in which each element was +-- originally inserted into the barrier. +barrierTakeMany :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value ByteString, TensorList (Value) component_types)) +barrierTakeMany' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value ByteString, TensorList (Value) component_types)) +batchCholesky :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +batchCholesky' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t +batchCholeskyGrad :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +batchCholeskyGrad' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +batchFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +batchFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +batchFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +batchFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +batchFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +batchFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +batchIFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +batchIFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +batchIFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +batchIFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +batchIFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +batchIFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) + +-- | Multiplies slices of two tensors in batches. +-- +-- Multiplies all slices of Tensor x and y (each +-- slice can be viewed as an element of a batch), and arranges the +-- individual results in a single output tensor of the same batch size. +-- Each of the individual slices can optionally be adjointed (to adjoint +-- a matrix means to transpose and conjugate it) before multiplication by +-- setting the adj_x or adj_y flag to True, +-- which are by default False. +-- +-- The input tensors x and y are 3-D or higher with +-- shape `[..., r_x, c_x]` and `[..., r_y, c_y]`. +-- +-- The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, +-- where: +-- +-- r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y +-- +-- It is computed as: +-- +-- output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) +batchMatMul :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +batchMatMul' :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +batchMatrixBandPart :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t +batchMatrixBandPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t +batchMatrixDeterminant :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +batchMatrixDeterminant' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t +batchMatrixDiag :: (TensorType t) => Tensor v'1 t -> Tensor Build t +batchMatrixDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t +batchMatrixDiagPart :: (TensorType t) => Tensor v'1 t -> Tensor Build t +batchMatrixDiagPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t +batchMatrixInverse :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +batchMatrixInverse' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t +batchMatrixSetDiag :: (TensorType t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +batchMatrixSetDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +batchMatrixSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +batchMatrixSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +batchMatrixSolveLs :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t +batchMatrixSolveLs' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t +batchMatrixTriangularSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +batchMatrixTriangularSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Batch normalization. +-- +-- This op is deprecated. Prefer `tf.nn.batch_normalization`. +batchNormWithGlobalNormalization :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor Build t +batchNormWithGlobalNormalization' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor Build t + +-- | Gradients for batch normalization. +-- +-- This op is deprecated. See `tf.nn.batch_normalization`. +batchNormWithGlobalNormalizationGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) +batchNormWithGlobalNormalizationGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Bool -> Float -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) +batchSelfAdjointEig :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +batchSelfAdjointEig' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t +batchSelfAdjointEigV2 :: (OneOf '[Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t) +batchSelfAdjointEigV2' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t) +batchSvd :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) +batchSvd' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) + +-- | BatchToSpace for 4-D tensors of type T. +-- +-- This is a legacy version of the more general BatchToSpaceND. +-- +-- Rearranges (permutes) data from batch into blocks of spatial data, +-- followed by cropping. This is the reverse transformation of +-- SpaceToBatch. More specifically, this op outputs a copy of the input +-- tensor where values from the batch dimension are moved in +-- spatial blocks to the height and width dimensions, +-- followed by cropping along the height and width +-- dimensions. +batchToSpace :: (TensorType t, OneOf '[Int32, Int64] tidx) => Int64 -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t +batchToSpace' :: (TensorType t, OneOf '[Int32, Int64] tidx) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t + +-- | BatchToSpace for N-D tensors of type T. +-- +-- This operation reshapes the "batch" dimension 0 into `M + 1` +-- dimensions of shape `block_shape + [batch]`, interleaves these blocks +-- back into the grid defined by the spatial dimensions `[1, ..., M]`, to +-- obtain a result with the same rank as the input. The spatial +-- dimensions of this intermediate result are then optionally cropped +-- according to crops to produce the output. This is the reverse +-- of SpaceToBatch. See below for a precise description. +batchToSpaceND :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tcrops) => Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tcrops -> Tensor Build t +batchToSpaceND' :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tcrops) => OpParams -> Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tcrops -> Tensor Build t + +-- | Compute the regularized incomplete beta integral \(I_x(a, b)\). +-- +-- The regularized incomplete beta integral is defined as: +-- +-- ``` I_x(a, b) = frac{B(x; a, b)}{B(a, b)} ``` where +-- +-- ``` B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt ``` +-- +-- is the incomplete beta function and \(B(a, b)\) is the *complete* beta +-- function. +betainc :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t +betainc' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t + +-- | Adds bias to value. +-- +-- This is a special case of `tf.add` where bias is restricted +-- to be 1-D. Broadcasting is supported, so value may have any +-- number of dimensions. +biasAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +biasAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | The backward operation for BiasAdd on the "bias" tensor. +-- +-- It accumulates all the values from out_backprop into the feature +-- dimension. For NHWC data format, the feature dimension is the last. +-- For NCHW data format, the feature dimension is the third-to-last. +biasAddGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t +biasAddGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Adds bias to value. +-- +-- This is a deprecated version of BiasAdd and will be soon removed. +-- +-- This is a special case of `tf.add` where bias is restricted +-- to be 1-D. Broadcasting is supported, so value may have any +-- number of dimensions. +biasAddV1 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +biasAddV1' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Bitcasts a tensor from one type to another without copying data. +-- +-- Given a tensor input, this operation returns a tensor that +-- has the same buffer data as input with datatype `type`. +-- +-- If the input datatype T is larger than the output datatype +-- `type` then the shape changes from [...] to [..., +-- sizeof(T)/sizeof(`type`)]. +-- +-- If T is smaller than `type`, the operator requires that the +-- rightmost dimension be equal to sizeof(`type`)/sizeof(T). The +-- shape then goes from [..., sizeof(`type`)/sizeof(T)] to +-- [...]. +-- +--
    +--
  • NOTE*: Bitcast is implemented as a low-level cast, so machines +-- with different endian orderings will give different results.
  • +--
+bitcast :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] type') => Tensor v'1 t -> Tensor Build type' +bitcast' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] type') => OpParams -> Tensor v'1 t -> Tensor Build type' + +-- | Return the shape of s0 op s1 with broadcast. +-- +-- Given s0 and s1, tensors that represent shapes, +-- compute r0, the broadcasted shape. s0, s1 +-- and r0 are all integer vectors. +broadcastArgs :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +broadcastArgs' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Return the reduction indices for computing gradients of s0 op s1 with +-- broadcast. +-- +-- This is typically used by gradient computations for a broadcasting +-- operation. +broadcastGradientArgs :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) +broadcastGradientArgs' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) + +-- | Performs beam search decoding on the logits given in input. +-- +-- A note about the attribute merge_repeated: For the beam search +-- decoder, this means that if consecutive entries in a beam are the +-- same, only the first of these is emitted. That is, when the top path +-- is "A B B B B", "A B" is returned if merge_repeated = True but "A B B +-- B B" is returned if merge_repeated = False. +cTCBeamSearchDecoder :: Int64 -> Int64 -> Tensor v'1 Float -> Tensor v'2 Int32 -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float) +cTCBeamSearchDecoder' :: OpParams -> Int64 -> Int64 -> Tensor v'1 Float -> Tensor v'2 Int32 -> ([Tensor Build Int64], [Tensor Build Int64], [Tensor Build Int64], Tensor Build Float) + +-- | Performs greedy decoding on the logits given in inputs. +-- +-- A note about the attribute merge_repeated: if enabled, when +-- consecutive logits' maximum indices are the same, only the first of +-- these is emitted. Labeling the blank *, the sequence "A B B * B +-- B" becomes "A B" if merge_repeated = True and "A B B B B" if +-- merge_repeated = False. +-- +-- Regardless of the value of merge_repeated, if the maximum index of a +-- given time and batch corresponds to the blank, index `(num_classes - +-- 1)`, no new element is emitted. +cTCGreedyDecoder :: Tensor v'1 Float -> Tensor v'2 Int32 -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float) +cTCGreedyDecoder' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int32 -> (Tensor Build Int64, Tensor Build Int64, Tensor Build Int64, Tensor Build Float) + +-- | Calculates the CTC Loss (log probability) for each batch entry. Also +-- calculates +-- +-- the gradient. This class performs the softmax operation for you, so +-- inputs should be e.g. linear projections of outputs by an LSTM. +cTCLoss :: Tensor v'1 Float -> Tensor v'2 Int64 -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> (Tensor Build Float, Tensor Build Float) +cTCLoss' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int64 -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> (Tensor Build Float, Tensor Build Float) + +-- | Cast x of type SrcT to y of DstT. +cast :: (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT +cast' :: (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT + +-- | Returns element-wise smallest integer in not less than x. +ceil :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +ceil' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Checks a tensor for NaN and Inf values. +-- +-- When run, reports an InvalidArgument error if tensor +-- has any values that are not a number (NaN) or infinity (Inf). +-- Otherwise, passes tensor as-is. +checkNumerics :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +checkNumerics' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes the Cholesky decomposition of one or more square matrices. +-- +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices, with the same constraints as the +-- single matrix Cholesky decomposition above. The output is a tensor of +-- the same shape as the input containing the Cholesky decompositions for +-- all input submatrices `[..., :, :]`. +cholesky :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +cholesky' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes the reverse mode backpropagated gradient of the Cholesky +-- algorithm. +-- +-- For an explanation see "Differentiation of the Cholesky algorithm" by +-- Iain Murray http://arxiv.org/abs/1602.07527. +choleskyGrad :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +choleskyGrad' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Converts two real numbers to a complex number. +-- +-- Given a tensor real representing the real part of a complex +-- number, and a tensor imag representing the imaginary part of a +-- complex number, this operation returns complex numbers elementwise of +-- the form \(a + bj\), where *a* represents the real part and *b* +-- represents the imag part. +-- +-- The input tensors real and imag must have the same +-- shape. +-- +-- For example: +-- +-- ``` # tensor real is [2.25, 3.25] # tensor imag is +-- [4.75, 5.75] tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + +-- 5.75j]] ``` +complex :: (OneOf '[Double, Float] t, OneOf '[Complex Double, Complex Float] tout) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build tout +complex' :: (OneOf '[Double, Float] t, OneOf '[Complex Double, Complex Float] tout) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build tout + +-- | Computes the complex absolute value of a tensor. +-- +-- Given a tensor x of complex numbers, this operation returns a +-- tensor of type float or double that is the absolute +-- value of each element in x. All elements in x must +-- be complex numbers of the form \(a + bj\). The absolute value is +-- computed as \( sqrt{a^2 + b^2}\). +complexAbs :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout +complexAbs' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout + +-- | Computes the ids of the positions in sampled_candidates that match +-- true_labels. +-- +-- When doing log-odds NCE, the result of this op should be passed +-- through a SparseToDense op, then added to the logits of the sampled +-- candidates. This has the effect of removing the sampled +-- labels that match the true labels by making the classifier sure that +-- they are sampled labels. +computeAccidentalHits :: Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float) +computeAccidentalHits' :: OpParams -> Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> (Tensor Build Int32, Tensor Build Int64, Tensor Build Float) + +-- | Concatenates tensors along one dimension. +concat :: (TensorType t) => Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t +concat' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t + +-- | Computes offsets of concat inputs within its output. +-- +-- For example: +-- +-- ```prettyprint # x is [2, 2, 7] # y is [2, 3, 7] # +-- z is [2, 5, 7] concat_offset(2, [x, y, z]) => [0, 0, 0], +-- [0, 2, 0], [0, 5, 0] ``` +concatOffset :: Tensor v'1 Int32 -> [Tensor v'2 Int32] -> [Tensor Build Int32] +concatOffset' :: OpParams -> Tensor v'1 Int32 -> [Tensor v'2 Int32] -> [Tensor Build Int32] + +-- | Concatenates tensors along one dimension. +concatV2 :: (TensorType t, OneOf '[Int32, Int64] tidx) => [Tensor v'1 t] -> Tensor v'2 tidx -> Tensor Build t +concatV2' :: (TensorType t, OneOf '[Int32, Int64] tidx) => OpParams -> [Tensor v'1 t] -> Tensor v'2 tidx -> Tensor Build t + +-- | A conditional accumulator for aggregating gradients. The accumulator +-- accepts +-- +-- gradients marked with local_step greater or equal to the most recent +-- global_step known to the accumulator. The average can be extracted +-- from the accumulator, provided sufficient gradients have been +-- accumulated. Extracting the average automatically resets the aggregate +-- to 0, and increments the global_step recorded by the accumulator. +conditionalAccumulator :: (MonadBuild m') => DataType -> Shape -> m' (Tensor Ref ByteString) +conditionalAccumulator' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (Tensor Ref ByteString) + +-- | Returns the complex conjugate of a complex number. +-- +-- Given a tensor input of complex numbers, this operation +-- returns a tensor of complex numbers that are the complex conjugate of +-- each element in input. The complex numbers in input +-- must be of the form \(a + bj\), where *a* is the real part and *b* is +-- the imaginary part. +-- +-- The complex conjugate returned by this operation is of the form \(a - +-- bj\). +-- +-- For example: +-- +-- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] +-- tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] ``` +conj :: (OneOf '[Complex Double, Complex Float] t) => Tensor v'1 t -> Tensor Build t +conj' :: (OneOf '[Complex Double, Complex Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Returns a constant tensor. +const :: (TensorType dtype) => Tensor Build dtype +const' :: (TensorType dtype) => OpParams -> Tensor Build dtype + +-- | Does nothing. Serves as a control trigger for scheduling. +-- +-- Only useful as a placeholder for control edges. +controlTrigger :: (MonadBuild m') => m' (ControlNode) +controlTrigger' :: (MonadBuild m') => OpParams -> m' (ControlNode) + +-- | Computes a 2-D convolution given 4-D input and filter +-- tensors. +-- +-- Given an input tensor of shape `[batch, in_height, in_width, +-- in_channels]` and a filter / kernel tensor of shape `[filter_height, +-- filter_width, in_channels, out_channels]`, this op performs the +-- following: +-- +--
    +--
  1. Flattens the filter to a 2-D matrix with shape `[filter_height * +-- filter_width * in_channels, output_channels]`.
  2. +--
  3. Extracts image patches from the input tensor to form a *virtual* +-- tensor of shape `[batch, out_height, out_width, filter_height * +-- filter_width * in_channels]`.
  4. +--
  5. For each patch, right-multiplies the filter matrix and the image +-- patch vector.
  6. +--
+-- +-- In detail, with the default NHWC format, +-- +-- output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, +-- strides[2] * j + dj, q] * filter[di, dj, q, k] +-- +-- Must have `strides[0] = strides[3] = 1`. For the most common case of +-- the same horizontal and vertices strides, `strides = [1, stride, +-- stride, 1]`. +conv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +conv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes the gradients of convolution with respect to the filter. +conv2DBackpropFilter :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t +conv2DBackpropFilter' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t + +-- | Computes the gradients of convolution with respect to the input. +conv2DBackpropInput :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t +conv2DBackpropInput' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t + +-- | Computes a 3-D convolution given 5-D input and filter +-- tensors. +-- +-- In signal processing, cross-correlation is a measure of similarity of +-- two waveforms as a function of a time-lag applied to one of them. This +-- is also known as a sliding dot product or sliding inner-product. +-- +-- Our Conv3D implements a form of cross-correlation. +conv3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +conv3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes the gradients of 3-D convolution with respect to the filter. +conv3DBackpropFilter :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t +conv3DBackpropFilter' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t + +-- | Computes the gradients of 3-D convolution with respect to the filter. +conv3DBackpropFilterV2 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t +conv3DBackpropFilterV2' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t + +-- | Computes the gradients of 3-D convolution with respect to the input. +conv3DBackpropInput :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t +conv3DBackpropInput' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t + +-- | Computes the gradients of 3-D convolution with respect to the input. +conv3DBackpropInputV2 :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t +conv3DBackpropInputV2' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t + +-- | Copy Op. +-- +-- Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on +-- the device on which the tensor is allocated. +-- +-- Unlike the CopyHost Op, this op does not have HostMemory constraint on +-- its input or output. +copy :: (TensorType t) => Tensor v'1 t -> Tensor Build t +copy' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Copy Host Op. +-- +-- Performs CPU-to-CPU deep-copying of tensor. +-- +-- Unlike the Copy Op, this op has HostMemory constraint on its input or +-- output. +copyHost :: (TensorType t) => Tensor v'1 t -> Tensor Build t +copyHost' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes cos of x element-wise. +cos :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +cos' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Increments ref until it reaches limit. +countUpTo :: (MonadBuild m', OneOf '[Int32, Int64] t) => Int64 -> Tensor Ref t -> m' (Tensor Value t) +countUpTo' :: (MonadBuild m', OneOf '[Int32, Int64] t) => OpParams -> Int64 -> Tensor Ref t -> m' (Tensor Value t) + +-- | Extracts crops from the input image tensor and bilinearly resizes them +-- (possibly +-- +-- with aspect ratio change) to a common output size specified by +-- crop_size. This is more general than the +-- crop_to_bounding_box op which extracts a fixed size slice +-- from the input image and does not allow resizing or aspect ratio +-- change. +-- +-- Returns a tensor with crops from the input image at +-- positions defined at the bounding box locations in boxes. The +-- cropped boxes are all resized (with bilinear interpolation) to a fixed +-- `size = [crop_height, crop_width]`. The result is a 4-D tensor +-- `[num_boxes, crop_height, crop_width, depth]`. +cropAndResize :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build Float +cropAndResize' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build Float + +-- | Computes the gradient of the crop_and_resize op wrt the input boxes +-- tensor. +cropAndResizeGradBoxes :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 t -> Tensor v'3 Float -> Tensor v'4 Int32 -> Tensor Build Float +cropAndResizeGradBoxes' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 t -> Tensor v'3 Float -> Tensor v'4 Int32 -> Tensor Build Float + +-- | Computes the gradient of the crop_and_resize op wrt the input image +-- tensor. +cropAndResizeGradImage :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t +cropAndResizeGradImage' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t + +-- | Compute the pairwise cross product. +-- +-- a and b must be the same shape; they can either be +-- simple 3-element vectors, or any shape where the innermost dimension +-- is 3. In the latter case, each pair of corresponding 3-element vectors +-- is cross-multiplied independently. +cross :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +cross' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Compute the cumulative product of the tensor x along +-- axis. +-- +-- By default, this op performs an inclusive cumprod, which means that +-- the first element of the input is identical to the first element of +-- the output: ```prettyprint tf.cumprod([a, b, c]) ==> [a, a * b, a * +-- b * c] ``` +-- +-- By setting the exclusive kwarg to True, an exclusive +-- cumprod is performed instead: ```prettyprint tf.cumprod([a, b, c], +-- exclusive=True) ==> [0, a, a * b] ``` +-- +-- By setting the reverse kwarg to True, the cumprod is +-- performed in the opposite direction: ```prettyprint tf.cumprod([a, b, +-- c], reverse=True) ==> [a * b * c, b * c, c] ``` This is more +-- efficient than using separate `tf.reverse` ops. +-- +-- The reverse and exclusive kwargs can also be combined: +-- ```prettyprint tf.cumprod([a, b, c], exclusive=True, reverse=True) +-- ==> [b * c, c, 0] ``` +cumprod :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t +cumprod' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t + +-- | Compute the cumulative sum of the tensor x along +-- axis. +-- +-- By default, this op performs an inclusive cumsum, which means that the +-- first element of the input is identical to the first element of the +-- output: ```prettyprint tf.cumsum([a, b, c]) ==> [a, a + b, a + b + +-- c] ``` +-- +-- By setting the exclusive kwarg to True, an exclusive +-- cumsum is performed instead: ```prettyprint tf.cumsum([a, b, c], +-- exclusive=True) ==> [0, a, a + b] ``` +-- +-- By setting the reverse kwarg to True, the cumsum is +-- performed in the opposite direction: ```prettyprint tf.cumsum([a, b, +-- c], reverse=True) ==> [a + b + c, b + c, c] ``` This is more +-- efficient than using separate `tf.reverse` ops. +-- +-- The reverse and exclusive kwargs can also be combined: +-- ```prettyprint tf.cumsum([a, b, c], exclusive=True, reverse=True) +-- ==> [b + c, c, 0] ``` +cumsum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t +cumsum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t + +-- | Debug Identity Op. +-- +-- Provides an identity mapping of the non-Ref type input tensor for +-- debugging. +debugIdentity :: (TensorType t) => Tensor v'1 t -> Tensor Build t +debugIdentity' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Debug NaN Value Counter Op +-- +-- Counts number of NaNs in the input tensor, for debugging. +debugNanCount :: (TensorType t) => Tensor v'1 t -> Tensor Build Int64 +debugNanCount' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Int64 + +-- | Debug Numeric Summary Op. +-- +-- Provide a basic summary of numeric value types, range and +-- distribution. +debugNumericSummary :: (TensorType t) => Tensor v'1 t -> Tensor Build Double +debugNumericSummary' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Double + +-- | Decode web-safe base64-encoded strings. +-- +-- Input may or may not have padding at the end. See EncodeBase64 for +-- padding. Web-safe means that input must use - and _ instead of + and +-- /. +decodeBase64 :: Tensor v'1 ByteString -> Tensor Build ByteString +decodeBase64' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString + +-- | Convert CSV records to tensors. Each column maps to one tensor. +-- +-- RFC 4180 format is expected for the CSV records. +-- (https:/tools.ietf.orghtml/rfc4180) Note that we allow leading +-- and trailing spaces with int or float field. +decodeCSV :: (OneOfs '[ByteString, Int32, Int64, Float] oUT_TYPE) => Tensor v'1 ByteString -> TensorList (v'2) oUT_TYPE -> TensorList (Build) oUT_TYPE +decodeCSV' :: (OneOfs '[ByteString, Int32, Int64, Float] oUT_TYPE) => OpParams -> Tensor v'1 ByteString -> TensorList (v'2) oUT_TYPE -> TensorList (Build) oUT_TYPE + +-- | Decode the first frame of a GIF-encoded image to a uint8 tensor. +-- +-- GIF with frame or transparency compression are not supported convert +-- animated GIF from compressed to uncompressed by: +-- +-- convert $src.gif -coalesce $dst.gif +decodeGif :: Tensor v'1 ByteString -> Tensor Build Word8 +decodeGif' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Word8 + +-- | Convert JSON-encoded Example records to binary protocol buffer +-- strings. +-- +-- This op translates a tensor containing Example records, encoded using +-- the standard JSON mapping, into a tensor containing the same +-- records encoded as binary protocol buffers. The resulting tensor can +-- then be fed to any of the other Example-parsing ops. +decodeJSONExample :: Tensor v'1 ByteString -> Tensor Build ByteString +decodeJSONExample' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString + +-- | Decode a JPEG-encoded image to a uint8 tensor. +-- +-- The attr channels indicates the desired number of color +-- channels for the decoded image. +-- +-- Accepted values are: +-- +--
    +--
  • 0: Use the number of channels in the JPEG-encoded image.
  • +--
  • 1: output a grayscale image.
  • +--
  • 3: output an RGB image.
  • +--
+-- +-- If needed, the JPEG-encoded image is transformed to match the +-- requested number of color channels. +-- +-- The attr ratio allows downscaling the image by an integer +-- factor during decoding. Allowed values are: 1, 2, 4, and 8. This is +-- much faster than downscaling the image later. +decodeJpeg :: Tensor v'1 ByteString -> Tensor Build Word8 +decodeJpeg' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Word8 + +-- | Decode a PNG-encoded image to a uint8 or uint16 tensor. +-- +-- The attr channels indicates the desired number of color +-- channels for the decoded image. +-- +-- Accepted values are: +-- +--
    +--
  • 0: Use the number of channels in the PNG-encoded image.
  • +--
  • 1: output a grayscale image.
  • +--
  • 3: output an RGB image.
  • +--
  • 4: output an RGBA image.
  • +--
+-- +-- If needed, the PNG-encoded image is transformed to match the requested +-- number of color channels. +decodePng :: (OneOf '[Word16, Word8] dtype) => Tensor v'1 ByteString -> Tensor Build dtype +decodePng' :: (OneOf '[Word16, Word8] dtype) => OpParams -> Tensor v'1 ByteString -> Tensor Build dtype + +-- | Reinterpret the bytes of a string as a vector of numbers. +decodeRaw :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] out_type) => Tensor v'1 ByteString -> Tensor Build out_type +decodeRaw' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type + +-- | Delete the tensor specified by its handle in the session. +deleteSessionTensor :: (MonadBuild m') => Tensor v'1 ByteString -> m' (ControlNode) +deleteSessionTensor' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> m' (ControlNode) + +-- | Applies set operation along last dimension of 2 Tensor inputs. +-- +-- See SetOperationOp::SetOperationFromContext for values of +-- set_operation. +-- +-- Output result is a SparseTensor represented by +-- result_indices, result_values, and +-- result_shape. For set1 and set2 ranked +-- n, this has rank n and the same 1st `n-1` dimensions +-- as set1 and set2. The nth dimension +-- contains the result of set_operation applied to the +-- corresponding `[0...n-1]` dimension of set. +denseToDenseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) +denseToDenseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) + +-- | Applies set operation along last dimension of Tensor and +-- SparseTensor. +-- +-- See SetOperationOp::SetOperationFromContext for values of +-- set_operation. +-- +-- Input set2 is a SparseTensor represented by +-- set2_indices, set2_values, and set2_shape. +-- For set2 ranked n, 1st `n-1` dimensions must be the +-- same as set1. Dimension n contains values in a set, +-- duplicates are allowed but ignored. +-- +-- If validate_indices is True, this op validates the +-- order and range of set2 indices. +-- +-- Output result is a SparseTensor represented by +-- result_indices, result_values, and +-- result_shape. For set1 and set2 ranked +-- n, this has rank n and the same 1st `n-1` dimensions +-- as set1 and set2. The nth dimension +-- contains the result of set_operation applied to the +-- corresponding `[0...n-1]` dimension of set. +denseToSparseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) +denseToSparseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) + +-- | DepthToSpace for tensors of type T. +-- +-- Rearranges data from depth into blocks of spatial data. This is the +-- reverse transformation of SpaceToDepth. More specifically, this op +-- outputs a copy of the input tensor where values from the +-- depth dimension are moved in spatial blocks to the +-- height and width dimensions. The attr +-- block_size indicates the input block size and how the data is +-- moved. +-- +--
    +--
  • Chunks of data of size `block_size * block_size` from depth are +-- rearranged into non-overlapping blocks of size `block_size x +-- block_size`
  • +--
  • The width the output tensor is `input_depth * block_size`, whereas +-- the height is `input_height * block_size`.
  • +--
  • The depth of the input tensor must be divisible by `block_size * +-- block_size`.
  • +--
+-- +-- That is, assuming the input is in the shape: `[batch, height, width, +-- depth]`, the shape of the output will be: `[batch, height*block_size, +-- width*block_size, depth/(block_size*block_size)]` +-- +-- This operation requires that the input tensor be of rank 4, and that +-- block_size be >=1 and that `block_size * block_size` be a +-- divisor of the input depth. +-- +-- This operation is useful for resizing the activations between +-- convolutions (but keeping all data), e.g. instead of pooling. It is +-- also useful for training purely convolutional models. +-- +-- For example, given this input of shape `[1, 1, 1, 4]`, and a block +-- size of 2: +-- +-- ```prettyprint x = [[[[1, 2, 3, 4]]]] +-- +-- ``` +-- +-- This operation will output a tensor of shape `[1, 2, 2, 1]`: +-- +-- ```prettyprint [[[[1], [2]], [[3], [4]]]] ``` +-- +-- Here, the input has a batch of 1 and each batch element has shape `[1, +-- 1, 4]`, the corresponding output will have 2x2 elements and will have +-- a depth of 1 channel (1 = `4 / (block_size * block_size)`). The output +-- element shape is `[2, 2, 1]`. +-- +-- For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, +-- e.g. +-- +-- ```prettyprint x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ``` +-- +-- This operation, for block size of 2, will return the following tensor +-- of shape `[1, 2, 2, 3]` +-- +-- ```prettyprint [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] +-- +-- ``` +-- +-- Similarly, for the following input of shape `[1 2 2 4]`, and a block +-- size of 2: +-- +-- ```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], +-- [13, 14, 15, 16]]]] ``` +-- +-- the operator will return the following tensor of shape `[1 4 4 1]`: +-- +-- ```prettyprint x = [[ [1], [2], [5], [6]], [ [3], [4], [7], [8]], [ +-- [9], [10], [13], [14]], [ [11], [12], [15], [16]]] +-- +-- ``` +depthToSpace :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor Build t +depthToSpace' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor Build t + +-- | Computes a 2-D depthwise convolution given 4-D input and +-- filter tensors. +-- +-- Given an input tensor of shape `[batch, in_height, in_width, +-- in_channels]` and a filter / kernel tensor of shape `[filter_height, +-- filter_width, in_channels, channel_multiplier]`, containing +-- in_channels convolutional filters of depth 1, +-- depthwise_conv2d applies a different filter to each input +-- channel (expanding from 1 channel to channel_multiplier +-- channels for each), then concatenates the results together. Thus, the +-- output has `in_channels * channel_multiplier` channels. +-- +-- for k in 0..in_channels-1 for q in 0..channel_multiplier-1 output[b, +-- i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * +-- i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] +-- +-- Must have `strides[0] = strides[3] = 1`. For the most common case of +-- the same horizontal and vertices strides, `strides = [1, stride, +-- stride, 1]`. +depthwiseConv2dNative :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +depthwiseConv2dNative' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes the gradients of depthwise convolution with respect to the +-- filter. +depthwiseConv2dNativeBackpropFilter :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t +depthwiseConv2dNativeBackpropFilter' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t + +-- | Computes the gradients of depthwise convolution with respect to the +-- input. +depthwiseConv2dNativeBackpropInput :: (OneOf '[Double, Float] t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t +depthwiseConv2dNativeBackpropInput' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t + +-- | Dequantize the input tensor into a float Tensor. +-- +--
    +--
  • min_range, max_range are scalar floats that specify the +-- range for the input data. The mode attribute +-- controls exactly which calculations are used to convert the float +-- values to their quantized equivalents.
  • +--
+-- +-- In MIN_COMBINED mode, each value of the tensor will undergo +-- the following: +-- +-- ``` if T == qint8, in[i] += (range(T) + 1)/ 2.0 out[i] = min_range + +-- (in[i]* (max_range - min_range) / range(T)) ``` here `range(T) = +-- numeric_limitsT::max() - numeric_limitsT::min()` +-- +--
    +--
  • MIN_COMBINED Mode Example*
  • +--
+-- +-- If the input comes from a QuantizedRelu6, the output type is quint8 +-- (range of 0-255) but the possible range of QuantizedRelu6 is 0-6. The +-- min_range and max_range values are therefore 0.0 and 6.0. Dequantize +-- on quint8 will take each value, cast to float, and multiply by 6 / +-- 255. Note that if quantizedtype is qint8, the operation will +-- additionally add each value by 128 prior to casting. +-- +-- If the mode is MIN_FIRST, then this approach is used: +-- +-- ``` number_of_steps = 1 << (# of bits in T) range_adjust = +-- number_of_steps / (number_of_steps - 1) range = (range_max - +-- range_min) * range_adjust range_scale = range / number_of_steps const +-- double offset_input = static_castdouble(input) - +-- lowest_quantized; result = range_min + ((input - +-- numeric_limitsT::min()) * range_scale) ``` +dequantize :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float +dequantize' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float + +-- | Deserialize and concatenate SparseTensors from a serialized +-- minibatch. +-- +-- The input serialized_sparse must be a string matrix of shape +-- `[N x 3]` where N is the minibatch size and the rows +-- correspond to packed outputs of SerializeSparse. The ranks of +-- the original SparseTensor objects must all match. When the +-- final SparseTensor is created, it has rank one higher than +-- the ranks of the incoming SparseTensor objects (they have +-- been concatenated along a new row dimension). +-- +-- The output SparseTensor object's shape values for all +-- dimensions but the first are the max across the input +-- SparseTensor objects' shape values for the corresponding +-- dimensions. Its first shape value is N, the minibatch size. +-- +-- The input SparseTensor objects' indices are assumed ordered +-- in standard lexicographic order. If this is not the case, after this +-- step run SparseReorder to restore index ordering. +-- +-- For example, if the serialized input is a `[2 x 3]` matrix +-- representing two original SparseTensor objects: +-- +-- index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] +-- +-- and +-- +-- index = [ 2] [10] values = [4, 5] shape = [30] +-- +-- then the final deserialized SparseTensor will be: +-- +-- index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] +-- shape = [2 50] +deserializeManySparse :: (TensorType dtype) => Tensor v'1 ByteString -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64) +deserializeManySparse' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> (Tensor Build Int64, Tensor Build dtype, Tensor Build Int64) + +-- | Destroys the temporary variable and returns its final value. +-- +-- Sets output to the value of the Tensor pointed to by ref, +-- then destroys the temporary variable called var_name. All +-- other uses of ref *must* have executed before this op. This +-- is typically achieved by chaining the ref through each assign op, or +-- by using control dependencies. +-- +-- Outputs the final value of the tensor pointed to by ref. +destroyTemporaryVariable :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Value t) +destroyTemporaryVariable' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Value t) + +-- | Returns a diagonal tensor with a given diagonal values. +-- +-- Given a diagonal, this operation returns a tensor with the +-- diagonal and everything else padded with zeros. The diagonal +-- is computed as follows: +-- +-- Assume diagonal has dimensions [D1,..., Dk], then the output +-- is a tensor of rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: +-- +-- `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 +-- everywhere else. +-- +-- For example: +-- +-- ```prettyprint # diagonal is [1, 2, 3, 4] tf.diag(diagonal) +-- ==> [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] ``` +diag :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor Build t +diag' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Returns the diagonal part of the tensor. +-- +-- This operation returns a tensor with the diagonal part of the +-- input. The diagonal part is computed as follows: +-- +-- Assume input has dimensions `[D1,..., Dk, D1,..., Dk]`, then +-- the output is a tensor of rank k with dimensions `[D1,..., +-- Dk]` where: +-- +-- `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. +-- +-- For example: +-- +-- ```prettyprint # input is [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, +-- 3, 0] [0, 0, 0, 4]] +-- +-- tf.diag_part(input) ==> [1, 2, 3, 4] ``` +diagPart :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor Build t +diagPart' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes Psi, the derivative of Lgamma (the log of the absolute value +-- of +-- +-- `Gamma(x)`), element-wise. +digamma :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +digamma' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes the grayscale dilation of 4-D input and 3-D +-- filter tensors. +-- +-- The input tensor has shape `[batch, in_height, in_width, +-- depth]` and the filter tensor has shape `[filter_height, +-- filter_width, depth]`, i.e., each input channel is processed +-- independently of the others with its own structuring function. The +-- output tensor has shape `[batch, out_height, out_width, +-- depth]`. The spatial dimensions of the output tensor depend on the +-- padding algorithm. We currently only support the default +-- NHWC data_format. +-- +-- In detail, the grayscale morphological 2-D dilation is the max-sum +-- correlation (for consistency with conv2d, we use unmirrored +-- filters): +-- +-- output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * +-- dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c] +-- +-- Max-pooling is a special case when the filter has size equal to the +-- pooling kernel size and contains all zeros. +-- +-- Note on duality: The dilation of input by the filter +-- is equal to the negation of the erosion of `-input` by the reflected +-- filter. +dilation2D :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +dilation2D' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes the gradient of morphological 2-D dilation with respect to +-- the filter. +dilation2DBackpropFilter :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t +dilation2DBackpropFilter' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t + +-- | Computes the gradient of morphological 2-D dilation with respect to +-- the input. +dilation2DBackpropInput :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t +dilation2DBackpropInput' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t + +-- | Returns x / y element-wise. +-- +--
    +--
  • NOTE*: Div supports broadcasting. More about broadcasting +-- here
  • +--
+div :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +div' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Draw bounding boxes on a batch of images. +-- +-- Outputs a copy of images but draws on top of the pixels zero +-- or more bounding boxes specified by the locations in boxes. +-- The coordinates of the each bounding box in boxes are encoded +-- as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are +-- floats in `[0.0, 1.0]` relative to the width and height of the +-- underlying image. +-- +-- For example, if an image is 100 x 200 pixels and the bounding box is +-- `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of +-- the bounding box will be `(10, 40)` to `(50, 180)`. +-- +-- Parts of the bounding box may fall outside the image. +drawBoundingBoxes :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor Build t +drawBoundingBoxes' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor Build t + +-- | Partitions `data` into num_partitions tensors using indices +-- from partitions. +-- +-- For each index tuple js of size `partitions.ndim`, the slice +-- `data[js, ...]` becomes part of `outputs[partitions[js]]`. The slices +-- with `partitions[js] = i` are placed in `outputs[i]` in lexicographic +-- order of js, and the first dimension of `outputs[i]` is the +-- number of entries in partitions equal to i. In +-- detail, +-- +-- ```python outputs[i].shape = [sum(partitions == i)] + +-- data.shape[partitions.ndim:] +-- +-- outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) ``` +-- +-- `data.shape` must start with `partitions.shape`. +-- +-- For example: +-- +-- ```python # Scalar partitions. partitions = 1 num_partitions = 2 data +-- = [10, 20] outputs[0] = [] # Empty with shape [0, 2] outputs[1] = +-- [[10, 20]] +-- +-- # Vector partitions. partitions = [0, 0, 1, 1, 0] num_partitions = 2 +-- data = [10, 20, 30, 40, 50] outputs[0] = [10, 20, 50] outputs[1] = +-- [30, 40] ``` +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/DynamicPartition.png" alt /div +dynamicPartition :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor v'2 Int32 -> [Tensor Build t] +dynamicPartition' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 Int32 -> [Tensor Build t] + +-- | Interleave the values from the `data` tensors into a single tensor. +-- +-- Builds a merged tensor such that +-- +-- ```python merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] +-- ``` +-- +-- For example, if each `indices[m]` is scalar or vector, we have +-- +-- ```python # Scalar indices: merged[indices[m], ...] = data[m][...] +-- +-- # Vector indices: merged[indices[m][i], ...] = data[m][i, ...] ``` +-- +-- Each `data[i].shape` must start with the corresponding +-- `indices[i].shape`, and the rest of `data[i].shape` must be constant +-- w.r.t. i. That is, we must have `data[i].shape = +-- indices[i].shape + constant`. In terms of this constant, the +-- output shape is +-- +-- merged.shape = [max(indices)] + constant +-- +-- Values are merged in order, so if an index appears in both +-- `indices[m][i]` and `indices[n][j]` for `(m,i) < (n,j)` the slice +-- `data[n][j]` will appear in the merged result. +-- +-- For example: +-- +-- ```python indices[0] = 6 indices[1] = [4, 1] indices[2] = [[5, 2], [0, +-- 3]] data[0] = [61, 62] data[1] = [[41, 42], [11, 12]] data[2] = [[[51, +-- 52], [21, 22]], [[1, 2], [31, 32]]] merged = [[1, 2], [11, 12], [21, +-- 22], [31, 32], [41, 42], [51, 52], [61, 62]] ``` +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/DynamicStitch.png" alt /div +dynamicStitch :: (TensorType t) => [Tensor v'1 Int32] -> [Tensor v'2 t] -> Tensor Build t +dynamicStitch' :: (TensorType t) => OpParams -> [Tensor v'1 Int32] -> [Tensor v'2 t] -> Tensor Build t + +-- | Computes the (possibly normalized) Levenshtein Edit Distance. +-- +-- The inputs are variable-length sequences provided by SparseTensors +-- (hypothesis_indices, hypothesis_values, hypothesis_shape) and +-- (truth_indices, truth_values, truth_shape). +-- +-- The inputs are: +editDistance :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor Build Float +editDistance' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor Build Float + +-- | Computes exponential linear: `exp(features) - 1` if < 0, +-- features otherwise. +-- +-- See Fast and Accurate Deep Network Learning by Exponential Linear +-- Units (ELUs) +elu :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t +elu' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes gradients for the exponential linear (Elu) operation. +eluGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +eluGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Encode strings into web-safe base64 format. +-- +-- Refer to the following article for more information on base64 format: +-- en.wikipedia.orgwikiBase64. Base64 strings may have padding +-- with '=' at the end so that the encoded has length multiple of 4. See +-- Padding section of the link above. +-- +-- Web-safe means that the encoder uses - and _ instead of + and /. +encodeBase64 :: Tensor v'1 ByteString -> Tensor Build ByteString +encodeBase64' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString + +-- | JPEG-encode an image. +-- +-- image is a 3-D uint8 Tensor of shape `[height, width, +-- channels]`. +-- +-- The attr format can be used to override the color format of +-- the encoded output. Values can be: +-- +--
    +--
  • `''`: Use a default format based on the number of channels in the +-- image.
  • +--
  • grayscale: Output a grayscale JPEG image. The +-- channels dimension of image must be 1.
  • +--
  • rgb: Output an RGB JPEG image. The channels +-- dimension of image must be 3.
  • +--
+-- +-- If format is not specified or is the empty string, a default +-- format is picked in function of the number of channels in +-- image: +-- +--
    +--
  • 1: Output a grayscale image.
  • +--
  • 3: Output an RGB image.
  • +--
+encodeJpeg :: Tensor v'1 Word8 -> Tensor Build ByteString +encodeJpeg' :: OpParams -> Tensor v'1 Word8 -> Tensor Build ByteString + +-- | PNG-encode an image. +-- +-- image is a 3-D uint8 or uint16 Tensor of shape `[height, +-- width, channels]` where channels is: +-- +--
    +--
  • 1: for grayscale.
  • +--
  • 2: for grayscale + alpha.
  • +--
  • 3: for RGB.
  • +--
  • 4: for RGBA.
  • +--
+-- +-- The ZLIB compression level, compression, can be -1 for the +-- PNG-encoder default or a value from 0 to 9. 9 is the highest +-- compression level, generating the smallest output, but is slower. +encodePng :: (OneOf '[Word16, Word8] t) => Tensor v'1 t -> Tensor Build ByteString +encodePng' :: (OneOf '[Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString + +-- | Creates or finds a child frame, and makes `data` available to the +-- child frame. +-- +-- This op is used together with Exit to create loops in the +-- graph. The unique frame_name is used by the Executor +-- to identify frames. If is_constant is true, output +-- is a constant in the child frame; otherwise it may be changed in the +-- child frame. At most parallel_iterations iterations are run +-- in parallel in the child frame. +enter :: (TensorType t) => Tensor v'1 t -> Tensor Build t +enter' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Returns the truth value of (x == y) element-wise. +-- +--
    +--
  • NOTE*: Equal supports broadcasting. More about +-- broadcasting here
  • +--
+equal :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool +equal' :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool + +-- | Computes the Gauss error function of x element-wise. +erf :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +erf' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes the complementary error function of x element-wise. +erfc :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +erfc' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Exits the current frame to its parent frame. +-- +-- Exit makes its input `data` available to the parent frame. +exit :: (TensorType t) => Tensor v'1 t -> Tensor Build t +exit' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes exponential of x element-wise. \(y = e^x\). +exp :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +exp' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Inserts a dimension of 1 into a tensor's shape. +-- +-- Given a tensor input, this operation inserts a dimension of 1 +-- at the dimension index dim of input's shape. The +-- dimension index dim starts at zero; if you specify a negative +-- number for dim it is counted backward from the end. +-- +-- This operation is useful if you want to add a batch dimension to a +-- single element. For example, if you have a single image of shape +-- `[height, width, channels]`, you can make it a batch of 1 image with +-- `expand_dims(image, 0)`, which will make the shape `[1, height, width, +-- channels]`. +-- +-- Other examples: +-- +-- ```prettyprint # t is a tensor of shape [2] +-- shape(expand_dims(t, 0)) ==> [1, 2] shape(expand_dims(t, 1)) ==> +-- [2, 1] shape(expand_dims(t, -1)) ==> [2, 1] +-- +-- # t2 is a tensor of shape [2, 3, 5] shape(expand_dims(t2, 0)) +-- ==> [1, 2, 3, 5] shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] +-- shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] ``` +-- +-- This operation requires that: +-- +-- `-1-input.dims() <= dim <= input.dims()` +-- +-- This operation is related to `squeeze()`, which removes dimensions of +-- size 1. +expandDims :: (TensorType t, OneOf '[Int32, Int64] tdim) => Tensor v'1 t -> Tensor v'2 tdim -> Tensor Build t +expandDims' :: (TensorType t, OneOf '[Int32, Int64] tdim) => OpParams -> Tensor v'1 t -> Tensor v'2 tdim -> Tensor Build t + +-- | Computes exponential of x - 1 element-wise. +-- +-- I.e., \(y = (exp x) - 1\). +expm1 :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +expm1' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Extracts a glimpse from the input tensor. +-- +-- Returns a set of windows called glimpses extracted at location +-- offsets from the input tensor. If the windows only partially +-- overlaps the inputs, the non overlapping areas will be filled with +-- random noise. +-- +-- The result is a 4-D tensor of shape `[batch_size, glimpse_height, +-- glimpse_width, channels]`. The channels and batch dimensions are the +-- same as that of the input tensor. The height and width of the output +-- windows are specified in the size parameter. +-- +-- The argument normalized and centered controls how +-- the windows are built: +-- +--
    +--
  • If the coordinates are normalized but not centered, 0.0 and 1.0 +-- correspond to the minimum and maximum of each height and width +-- dimension.
  • +--
  • If the coordinates are both normalized and centered, they range +-- from
  • +--
  • 1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper +-- left corner, the lower right corner is located at (1.0, 1.0) and the +-- center is at (0, 0).
  • +--
  • If the coordinates are not normalized they are interpreted as +-- numbers of pixels.
  • +--
+extractGlimpse :: Tensor v'1 Float -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build Float +extractGlimpse' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build Float + +-- | Extract patches from images and put them in the +-- "depth" output dimension. +extractImagePatches :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t +extractImagePatches' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Compute the 1-dimensional discrete Fourier Transform over the +-- inner-most +-- +-- dimension of input. +fFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +fFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) + +-- | Compute the 2-dimensional discrete Fourier Transform over the +-- inner-most +-- +-- 2 dimensions of input. +fFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +fFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) + +-- | Compute the 3-dimensional discrete Fourier Transform over the +-- inner-most 3 +-- +-- dimensions of input. +fFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +fFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) + +-- | A queue that produces elements in first-in first-out order. +fIFOQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) +fIFOQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) + +-- | A queue that produces elements in first-in first-out order. +fIFOQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle) +fIFOQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle) + +-- | Output a fact about factorials. +fact :: Tensor Build ByteString +fact' :: OpParams -> Tensor Build ByteString + +-- | Fake-quantize the inputs tensor, type float to +-- outputs tensor of same type. +-- +-- Attributes [min; max] define the clamping range for the +-- inputs data. Op divides this range into 255 steps (total of +-- 256 values), then replaces each inputs value with the closest +-- of the quantized step values. +-- +-- Quantization is called fake since the output is still in floating +-- point. +fakeQuantWithMinMaxArgs :: Tensor v'1 Float -> Tensor Build Float +fakeQuantWithMinMaxArgs' :: OpParams -> Tensor v'1 Float -> Tensor Build Float + +-- | Compute gradients for a FakeQuantWithMinMaxArgs operation. +fakeQuantWithMinMaxArgsGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float +fakeQuantWithMinMaxArgsGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor Build Float + +-- | Fake-quantize the inputs tensor of type float and shape `[b, +-- h, w, d]` via +-- +-- global float scalars min and max to outputs +-- tensor of same shape as inputs. +-- +--
    +--
  • min; max is the clamping range for the inputs +-- data. Op divides this range into 255 steps (total of 256 values), then +-- replaces each inputs value with the closest of the quantized +-- step values.
  • +--
+-- +-- This operation has a gradient and thus allows for training min +-- and max values. +fakeQuantWithMinMaxVars :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float +fakeQuantWithMinMaxVars' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float + +-- | Compute gradients for a FakeQuantWithMinMaxVars operation. +fakeQuantWithMinMaxVarsGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) +fakeQuantWithMinMaxVarsGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) + +-- | Fake-quantize the inputs tensor of type float and one of the +-- shapes: `[d]`, +-- +-- `[b, d]` `[b, h, w, d]` via per-channel floats min and +-- max of shape `[d]` to outputs tensor of same shape as +-- inputs. +-- +--
    +--
  • min; max is the clamping range for the inputs data +-- in the corresponding depth channel. Op divides this range into 255 +-- steps (total of 256 values), then replaces each inputs value +-- with the closest of the quantized step values.
  • +--
+-- +-- This operation has a gradient and thus allows for training min +-- and max values. +fakeQuantWithMinMaxVarsPerChannel :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float +fakeQuantWithMinMaxVarsPerChannel' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor Build Float + +-- | Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. +fakeQuantWithMinMaxVarsPerChannelGradient :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) +fakeQuantWithMinMaxVarsPerChannelGradient' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) + +-- | Deprecated. Do not use. +fakeQueue :: (MonadBuild m') => ResourceHandle -> m' (Tensor Ref ByteString) +fakeQueue' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Ref ByteString) + +-- | Creates a tensor filled with a scalar value. +-- +-- This operation creates a tensor of shape dims and fills it +-- with value. +-- +-- For example: +-- +-- ```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9) +-- ==> [[9, 9, 9] [9, 9, 9]] ``` +fill :: (TensorType t) => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t +fill' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t + +-- | A Reader that outputs fixed-length records from a file. +fixedLengthRecordReader :: (MonadBuild m') => Int64 -> m' (Tensor Ref ByteString) +fixedLengthRecordReader' :: (MonadBuild m') => OpParams -> Int64 -> m' (Tensor Ref ByteString) + +-- | A Reader that outputs fixed-length records from a file. +fixedLengthRecordReaderV2 :: (MonadBuild m') => Int64 -> m' (ResourceHandle) +fixedLengthRecordReaderV2' :: (MonadBuild m') => OpParams -> Int64 -> m' (ResourceHandle) + +-- | Generates labels for candidate sampling with a learned unigram +-- distribution. +-- +-- A unigram sampler could use a fixed unigram distribution read from a +-- file or passed in as an in-memory array instead of building up the +-- distribution from data on the fly. There is also an option to skew the +-- distribution by applying a distortion power to the weights. +-- +-- The vocabulary file should be in CSV-like format, with the last field +-- being the weight associated with the word. +-- +-- For each batch, this op picks a single set of sampled candidate +-- labels. +-- +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +fixedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) +fixedUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) + +-- | Returns element-wise largest integer not greater than x. +floor :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +floor' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Returns x // y element-wise. +-- +--
    +--
  • NOTE*: FloorDiv supports broadcasting. More about +-- broadcasting here
  • +--
+floorDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +floorDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Returns element-wise remainder of division. When `x < 0` xor `y +-- < 0` is +-- +-- true, this follows Python semantics in that the result here is +-- consistent with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) +-- = x`. +-- +--
    +--
  • NOTE*: FloorMod supports broadcasting. More about +-- broadcasting here
  • +--
+floorMod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +floorMod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Performs fractional average pooling on the input. +-- +-- Fractional average pooling is similar to Fractional max pooling in the +-- pooling region generation step. The only difference is that after +-- pooling regions are generated, a mean operation is performed instead +-- of a max operation in each pooling region. +fractionalAvgPool :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) +fractionalAvgPool' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) -- | Computes gradient of the FractionalAvgPool function. -- @@ -59,10 +1907,8 @@ quantizedBiasAdd :: (TensorType t1, OneOf '[Int16, Int32, Word16, Word8] t1, Ten -- element of out_backprop to those indices that form the same pooling -- cell. Therefore, we just need to know the shape of original input -- tensor, instead of the whole tensor. -fractionalAvgPoolGrad :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int64 -> Tensor Value t - --- | Computes gradient of the FractionalMaxPool function. -fractionalMaxPoolGrad :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 Int64 -> Tensor v5 Int64 -> Tensor Value t +fractionalAvgPoolGrad :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor Build t +fractionalAvgPoolGrad' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor Build t -- | Performs fractional max pooling on the input. -- @@ -103,25 +1949,355 @@ fractionalMaxPoolGrad :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) -- -- For more details on fractional max pooling, see this paper: -- Benjamin Graham, Fractional Max-Pooling -fractionalMaxPool :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value Int64, Tensor Value Int64) +fractionalMaxPool :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) +fractionalMaxPool' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int64, Tensor Build Int64) --- | Finds values and indices of the k largest elements for the --- last dimension. +-- | Computes gradient of the FractionalMaxPool function. +fractionalMaxPoolGrad :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 Int64 -> Tensor v'5 Int64 -> Tensor Build t +fractionalMaxPoolGrad' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 Int64 -> Tensor v'5 Int64 -> Tensor Build t + +-- | Batch normalization. -- --- If the input is a vector (rank-1), finds the k largest --- entries in the vector and outputs their values and indices as vectors. --- Thus `values[j]` is the j-th largest entry in input, --- and its index is `indices[j]`. +-- Note that the size of 4D Tensors are defined by either NHWC or +-- NCHW. The size of 1D Tensors matches the dimension C of the 4D +-- Tensors. +fusedBatchNorm :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) +fusedBatchNorm' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) + +-- | Gradient for batch normalization. -- --- For matrices (resp. higher rank input), computes the top k --- entries in each row (resp. vector along the last dimension). Thus, +-- Note that the size of 4D Tensors are defined by either NHWC or +-- NCHW. The size of 1D Tensors matches the dimension C of the 4D +-- Tensors. +fusedBatchNormGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) +fusedBatchNormGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) + +-- | Performs a padding as a preprocess during a convolution. -- --- values.shape = indices.shape = input.shape[:-1] + [k] +-- Similar to FusedResizeAndPadConv2d, this op allows for an optimized +-- implementation where the spatial padding transformation stage is fused +-- with the im2col lookup, but in this case without the bilinear +-- filtering required for resizing. Fusing the padding prevents the need +-- to write out the intermediate results as whole tensors, reducing +-- memory pressure, and we can get some latency gains by merging the +-- transformation calculations. The data_format attribute for Conv2D +-- isn't supported by this op, and NHWC order is used instead. +-- Internally this op uses a single per-graph scratch buffer, which means +-- that it will block if multiple versions are being run in parallel. +-- This is because this operator is primarily an optimization to minimize +-- memory usage. +fusedPadConv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t +fusedPadConv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor Build t + +-- | Performs a resize and padding as a preprocess during a convolution. -- --- If two elements are equal, the lower-index element appears first. +-- It's often possible to do spatial transformations more efficiently as +-- part of the packing stage of a convolution, so this op allows for an +-- optimized implementation where these stages are fused together. This +-- prevents the need to write out the intermediate results as whole +-- tensors, reducing memory pressure, and we can get some latency gains +-- by merging the transformation calculations. The data_format attribute +-- for Conv2D isn't supported by this op, and defaults to NHWC +-- order. Internally this op uses a single per-graph scratch buffer, +-- which means that it will block if multiple versions are being run in +-- parallel. This is because this operator is primarily an optimization +-- to minimize memory usage. +fusedResizeAndPadConv2D :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor v'4 t -> Tensor Build t +fusedResizeAndPadConv2D' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor v'4 t -> Tensor Build t + +-- | Gather slices from params according to indices. -- --- If k varies dynamically, use TopKV2 below. -topK :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Int64 -> Tensor v1 t -> (Tensor Value t, Tensor Value Int32) +-- indices must be an integer tensor of any dimension (usually +-- 0-D or 1-D). Produces an output tensor with shape `indices.shape + +-- params.shape[1:]` where: +-- +-- ```python # Scalar indices output[:, ..., :] = params[indices, :, ... +-- :] +-- +-- # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] +-- +-- # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, +-- ..., j], :, ..., :] ``` +-- +-- If indices is a permutation and `len(indices) == +-- params.shape[0]` then this operation will permute params +-- accordingly. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/Gather.png" alt /div +gather :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams +gather' :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams + +-- | Gather values or slices from params according to +-- indices. +-- +-- params is a Tensor of rank P and indices is +-- a Tensor of rank Q. +-- +-- indices must be integer tensor, containing indices into +-- params. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 +-- < K <= P`. +-- +-- The innermost dimension of indices (with length K) +-- corresponds to indices into elements (if `K = P`) or slices (if `K +-- < P`) along the Kth dimension of params. +-- +-- Produces an output tensor with shape +-- +-- ``` [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]]. ``` +-- +-- Some examples below. +-- +-- Simple indexing into a matrix: +-- +-- ```python indices = [[0, 0], [1, 1]] params = [[a, +-- b], [c, d]] output = [a, +-- d] ``` +-- +-- Slice indexing into a matrix: +-- +-- ```python indices = [[1], [0]] params = [[a, b], +-- [c, d]] output = [[c, d], +-- [a, b]] ``` +-- +-- Indexing into a 3-tensor: +-- +-- ```python indices = [[1]] params = [[[a0, b0], +-- [c0, d0]], [[a1, b1], +-- [c1, d1]]] output = [[[a1, b1], +-- [c1, d1]]] +-- +-- indices = [[0, 1], [1, 0]] params = [[[a0, b0], +-- [c0, d0]], [[a1, b1], +-- [c1, d1]]] output = [[c0, d0], +-- [a1, b1]] +-- +-- indices = [[0, 0, 1], [1, 0, 1]] params = [[[a0, +-- b0], [c0, d0]], [[a1, +-- b1], [c1, d1]]] output = [b0, +-- b1] ``` +-- +-- Batched indexing into a matrix: +-- +-- ```python indices = [[[0, 0]], [[0, 1]]] params = [[a, +-- b], [c, d]] output = [[a], +-- [b]] ``` +-- +-- Batched slice indexing into a matrix: +-- +-- ```python indices = [[[1]], [[0]]] params = [[a, b], +-- [c, d]] output = [[[c, d]], +-- [[a, b]]] ``` +-- +-- Batched indexing into a 3-tensor: +-- +-- ```python indices = [[[1]], [[0]]] params = [[[a0, +-- b0], [c0, d0]], [[a1, +-- b1], [c1, d1]]] output = [[[[a1, +-- b1], [c1, d1]]], [[[a0, +-- b0], [c0, d0]]]] +-- +-- indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] params = +-- [[[a0, b0], [c0, d0]], +-- [[a1, b1], [c1, d1]]] output = +-- [[[c0, d0], [a1, b1]], +-- [[a0, b0], [c1, d1]]] +-- +-- indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] params = +-- [[[a0, b0], [c0, d0]], +-- [[a1, b1], [c1, d1]]] output = +-- [[b0, b1], [d0, c1]] ``` +gatherNd :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams +gatherNd' :: (TensorType tparams, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tparams -> Tensor v'2 tindices -> Tensor Build tparams + +-- | Store the input tensor in the state of the current session. +getSessionHandle :: (TensorType t) => Tensor v'1 t -> Tensor Build ByteString +getSessionHandle' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString + +-- | Get the value of the tensor specified by its handle. +getSessionTensor :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor Build dtype +getSessionTensor' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor Build dtype + +-- | Returns the truth value of (x > y) element-wise. +-- +--
    +--
  • NOTE*: Greater supports broadcasting. More about +-- broadcasting here
  • +--
+greater :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool +greater' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool + +-- | Returns the truth value of (x >= y) element-wise. +-- +--
    +--
  • NOTE*: GreaterEqual supports broadcasting. More about +-- broadcasting here
  • +--
+greaterEqual :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool +greaterEqual' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool + +-- | Convert one or more images from HSV to RGB. +-- +-- Outputs a tensor of the same shape as the images tensor, +-- containing the RGB value of the pixels. The output is only well +-- defined if the value in images are in `[0,1]`. +-- +-- See rgb_to_hsv for a description of the HSV encoding. +hSVToRGB :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +hSVToRGB' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Creates a non-initialized hash table. +-- +-- This op creates a hash table, specifying the type of its keys and +-- values. Before using the table you will have to initialize it. After +-- initialization the table will be immutable. +hashTable :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString) +hashTable' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString) + +-- | Outputs a Summary protocol buffer with a histogram. +-- +-- The generated `Summary` has one summary value containing a +-- histogram for values. +-- +-- This op reports an InvalidArgument error if any value is not +-- finite. +histogramSummary :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString +histogramSummary' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString + +-- | Compute the inverse 1-dimensional discrete Fourier Transform over the +-- inner-most +-- +-- dimension of input. +iFFT :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +iFFT' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) + +-- | Compute the inverse 2-dimensional discrete Fourier Transform over the +-- inner-most +-- +-- 2 dimensions of input. +iFFT2D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +iFFT2D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) + +-- | Compute the inverse 3-dimensional discrete Fourier Transform over the +-- inner-most +-- +-- 3 dimensions of input. +iFFT3D :: Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) +iFFT3D' :: OpParams -> Tensor v'1 (Complex Float) -> Tensor Build (Complex Float) + +-- | Return a tensor with the same shape and contents as the input tensor +-- or value. +identity :: (TensorType t) => Tensor v'1 t -> Tensor Build t +identity' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | A Reader that outputs the queued work as both the key and value. +-- +-- To use, enqueue strings in a Queue. ReaderRead will take the front +-- work string and output (work, work). +identityReader :: (MonadBuild m') => m' (Tensor Ref ByteString) +identityReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) + +-- | A Reader that outputs the queued work as both the key and value. +-- +-- To use, enqueue strings in a Queue. ReaderRead will take the front +-- work string and output (work, work). +identityReaderV2 :: (MonadBuild m') => m' (ResourceHandle) +identityReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) + +-- | Compute the lower regularized incomplete Gamma function `Q(a, x)`. +-- +-- The lower regularized incomplete Gamma function is defined as: +-- +-- ``` P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) ``` where ``` +-- gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt ``` is the lower +-- incomplete Gamma function. +-- +-- Note, above `Q(a, x)` (Igammac) is the upper regularized +-- complete Gamma function. +igamma :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +igamma' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Compute the upper regularized incomplete Gamma function `Q(a, x)`. +-- +-- The upper regularized incomplete Gamma function is defined as: +-- +-- ``` Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) ``` where ``` +-- Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt ``` is the upper +-- incomplete Gama function. +-- +-- Note, above `P(a, x)` (Igamma) is the lower regularized +-- complete Gamma function. +igammac :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +igammac' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Returns the imaginary part of a complex number. +-- +-- Given a tensor input of complex numbers, this operation +-- returns a tensor of type float that is the imaginary part of +-- each element in input. All elements in input must be +-- complex numbers of the form \(a + bj\), where *a* is the real part and +-- *b* is the imaginary part returned by this operation. +-- +-- For example: +-- +-- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] +-- tf.imag(input) ==> [4.75, 5.75] ``` +imag :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout +imag' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout + +-- | Outputs a Summary protocol buffer with images. +-- +-- The summary has up to max_images summary values containing +-- images. The images are built from tensor which must be 4-D +-- with shape `[batch_size, height, width, channels]` and where +-- channels can be: +-- +--
    +--
  • 1: tensor is interpreted as Grayscale.
  • +--
  • 3: tensor is interpreted as RGB.
  • +--
  • 4: tensor is interpreted as RGBA.
  • +--
+-- +-- The images have the same number of channels as the input tensor. For +-- float input, the values are normalized one image at a time to fit in +-- the range `[0, 255]`. uint8 values are unchanged. The op uses +-- two different normalization algorithms: +-- +--
    +--
  • If the input values are all positive, they are rescaled so the +-- largest one is 255.
  • +--
  • If any input value is negative, the values are shifted so input +-- value 0.0 is at 127. They are then rescaled so that either the +-- smallest value is 0, or the largest one is 255.
  • +--
+-- +-- The tag argument is a scalar Tensor of type +-- string. It is used to build the tag of the summary +-- values: +-- +--
    +--
  • If max_images is 1, the summary value tag is +-- '*tag*/image'.
  • +--
  • If max_images is greater than 1, the summary value tags +-- are generated sequentially as '*tag*/image/0', '*tag*/image/1', +-- etc.
  • +--
+-- +-- The bad_color argument is the color to use in the generated +-- images for non-finite input values. It is a unit8 1-D tensor +-- of length channels. Each element must be in the range `[0, +-- 255]` (It represents the value of a pixel in the output image). +-- Non-finite values in the input tensor are replaced by this tensor in +-- the output image. The default value is the color red. +imageSummary :: (OneOf '[Word16, Word8, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString +imageSummary' :: (OneOf '[Word16, Word8, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString + +-- | Returns immutable tensor from memory region. +-- +-- The current implementation memmaps the tensor from a file. +immutableConst :: (TensorType dtype) => Shape -> Tensor Build dtype +immutableConst' :: (TensorType dtype) => OpParams -> Shape -> Tensor Build dtype -- | Says whether the targets are in the top K predictions. -- @@ -141,170 +2317,1123 @@ topK :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, -- -- $$out_i = predictions_{i, targets_i} in -- TopKIncludingTies(predictions_i)$$ -inTopK :: (TensorType t, OneOf '[Int32, Int64] t) => Int64 -> Tensor v1 Float -> Tensor v2 t -> Tensor Value Bool +inTopK :: (OneOf '[Int32, Int64] t) => Int64 -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build Bool +inTopK' :: (OneOf '[Int32, Int64] t) => OpParams -> Int64 -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build Bool --- | Computes softmax cross entropy cost and gradients to backpropagate. --- --- Unlike SoftmaxCrossEntropyWithLogits, this operation does not --- accept a matrix of label probabilities, but rather a single label per --- row of features. This label is considered to have probability 1.0 for --- the given row. --- --- Inputs are the logits, not probabilities. -sparseSoftmaxCrossEntropyWithLogits :: (TensorType t, OneOf '[Word16, Double, Float] t, TensorType tlabels, OneOf '[Int32, Int64] tlabels) => Tensor v1 t -> Tensor v2 tlabels -> (Tensor Value t, Tensor Value t) +-- | Table initializer that takes two tensors for keys and values +-- respectively. +initializeTable :: (MonadBuild m', TensorType tkey, TensorType tval) => Tensor Ref ByteString -> Tensor v'2 tkey -> Tensor v'3 tval -> m' (ControlNode) +initializeTable' :: (MonadBuild m', TensorType tkey, TensorType tval) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tkey -> Tensor v'3 tval -> m' (ControlNode) --- | Computes softmax cross entropy cost and gradients to backpropagate. +-- | Initializes a table from a text file. -- --- Inputs are the logits, not probabilities. -softmaxCrossEntropyWithLogits :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value t) +-- It inserts one key-value pair into the table for each line of the +-- file. The key and value is extracted from the whole line content, +-- elements from the split line based on delimiter or the line +-- number (starting from zero). Where to extract the key and value from a +-- line is specified by key_index and value_index. +-- +--
    +--
  • A value of -1 means use the line number(starting from zero), +-- expects int64.
  • +--
  • A value of -2 means use the whole line content, expects +-- string.
  • +--
  • A value >= 0 means use the index (starting at zero) of the +-- split line based on delimiter.
  • +--
+initializeTableFromTextFile :: (MonadBuild m') => Int64 -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) +initializeTableFromTextFile' :: (MonadBuild m') => OpParams -> Int64 -> Int64 -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) + +-- | Computes the reciprocal of x element-wise. +-- +-- I.e., \(y = 1 / x\). +inv :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +inv' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes the gradient for the inverse of x wrt its input. +-- +-- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy is +-- the corresponding input gradient. +invGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +invGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes the inverse permutation of a tensor. +-- +-- This operation computes the inverse of an index permutation. It takes +-- a 1-D integer tensor x, which represents the indices of a +-- zero-based array, and swaps each value with its index position. In +-- other words, for an output tensor y and an input tensor +-- x, this operation computes the following: +-- +-- `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` +-- +-- The values must include 0. There can be no duplicate values or +-- negative values. +-- +-- For example: +-- +-- ```prettyprint # tensor x is [3, 4, 0, 2, 1] +-- invert_permutation(x) ==> [2, 4, 3, 0, 1] ``` +invertPermutation :: (OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor Build t +invertPermutation' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Returns which elements of x are finite. +-- +-- compatibility(numpy) Equivalent to np.isfinite +-- end_compatibility +isFinite :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool +isFinite' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool + +-- | Returns which elements of x are Inf. +-- +-- compatibility(numpy) Equivalent to np.isinf end_compatibility +isInf :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool +isInf' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool + +-- | Returns which elements of x are NaN. +-- +-- compatibility(numpy) Equivalent to np.isnan end_compatibility +isNan :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build Bool +isNan' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build Bool + +-- | Checks whether a tensor has been initialized. +-- +-- Outputs boolean scalar indicating whether the tensor has been +-- initialized. +isVariableInitialized :: (MonadBuild m', TensorType dtype) => Tensor Ref dtype -> m' (Tensor Value Bool) +isVariableInitialized' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref dtype -> m' (Tensor Value Bool) + +-- | L2 Loss. +-- +-- Computes half the L2 norm of a tensor without the sqrt: +-- +-- output = sum(t ** 2) / 2 +l2Loss :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t +l2Loss' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Local Response Normalization. +-- +-- The 4-D input tensor is treated as a 3-D array of 1-D vectors +-- (along the last dimension), and each vector is normalized +-- independently. Within a given vector, each component is divided by the +-- weighted, squared sum of inputs within depth_radius. In +-- detail, +-- +-- sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + +-- depth_radius + 1] ** 2) output = input / (bias + alpha * sqr_sum) ** +-- beta +-- +-- For details, see Krizhevsky et al., ImageNet classification with +-- deep convolutional neural networks (NIPS 2012). +lRN :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor Build t +lRN' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Gradients for Local Response Normalization. +lRNGrad :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t +lRNGrad' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t + +-- | Generates labels for candidate sampling with a learned unigram +-- distribution. +-- +-- See explanations of candidate sampling and the data formats at +-- go/candidate-sampling. +-- +-- For each batch, this op picks a single set of sampled candidate +-- labels. +-- +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +learnedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) +learnedUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) + +-- | Returns the truth value of (x < y) element-wise. +-- +--
    +--
  • NOTE*: Less supports broadcasting. More about +-- broadcasting here
  • +--
+less :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool +less' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool + +-- | Returns the truth value of (x <= y) element-wise. +-- +--
    +--
  • NOTE*: LessEqual supports broadcasting. More about +-- broadcasting here
  • +--
+lessEqual :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool +lessEqual' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool + +-- | Computes the log of the absolute value of `Gamma(x)` element-wise. +lgamma :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +lgamma' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Generates values in an interval. +-- +-- A sequence of num evenly-spaced values are generated +-- beginning at start. If `num > 1`, the values in the +-- sequence increase by `stop - start / num - 1`, so that the last one is +-- exactly stop. +-- +-- For example: +-- +-- ``` tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 +-- 12.0] ``` +linSpace :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 tidx -> Tensor Build t +linSpace' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 tidx -> Tensor Build t + +-- | Computes the difference between two lists of numbers or strings. +-- +-- Given a list x and a list y, this operation returns +-- a list out that represents all values that are in x +-- but not in y. The returned list out is sorted in the +-- same order that the numbers appear in x (duplicates are +-- preserved). This operation also returns a list idx that +-- represents the position of each out element in x. In +-- other words: +-- +-- `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` +-- +-- For example, given this input: +-- +-- ```prettyprint x = [1, 2, 3, 4, 5, 6] y = [1, 3, 5] ``` +-- +-- This operation would return: +-- +-- ```prettyprint out ==> [2, 4, 6] idx ==> [1, 3, 5] ``` +listDiff :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build out_idx) +listDiff' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build out_idx) + +-- | Computes natural logarithm of x element-wise. +-- +-- I.e., \(y = log_e x\). +log :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +log' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes natural logarithm of (1 + x) element-wise. +-- +-- I.e., \(y = log_e (1 + x)\). +log1p :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +log1p' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes log softmax activations. -- -- For each batch i and class j we have -- -- logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) -logSoftmax :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t +logSoftmax :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +logSoftmax' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t --- | Computes softsign gradients for a softsign operation. -softsignGrad :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes softplus: `log(exp(features) + 1)`. -softplus :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes gradients for the exponential linear (Elu) operation. -eluGrad :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes exponential linear: `exp(features) - 1` if < 0, --- features otherwise. +-- | Generates labels for candidate sampling with a log-uniform +-- distribution. -- --- See Fast and Accurate Deep Network Learning by Exponential Linear --- Units (ELUs) -elu :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes rectified linear 6: `min(max(features, 0), 6)`. -relu6 :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes rectified linear gradients for a Relu operation. -reluGrad :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes the gradient of morphological 2-D dilation with respect to --- the input. -dilation2DBackpropInput :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t - --- | Computes gradients of the maxpooling function. -maxPoolGrad :: (TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t - --- | Gradients for Local Response Normalization. -lRNGrad :: (TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t - --- | Computes gradients of max pooling function. -maxPool3DGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Float -> Tensor v2 Float -> Tensor v3 t -> Tensor Value t - --- | Computes the gradients of 3-D convolution with respect to the filter. -conv3DBackpropFilterV2 :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Value t - --- | Computes the gradients of 3-D convolution with respect to the filter. -conv3DBackpropFilter :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t - --- | Computes a 3-D convolution given 5-D input and filter --- tensors. +-- See explanations of candidate sampling and the data formats at +-- go/candidate-sampling. -- --- In signal processing, cross-correlation is a measure of similarity of --- two waveforms as a function of a time-lag applied to one of them. This --- is also known as a sliding dot product or sliding inner-product. +-- For each batch, this op picks a single set of sampled candidate +-- labels. -- --- Our Conv3D implements a form of cross-correlation. -conv3D :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +logUniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) +logUniformCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) --- | Computes the gradients of depthwise convolution with respect to the --- filter. -depthwiseConv2dNativeBackpropFilter :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Value t - --- | Computes the gradients of convolution with respect to the filter. -conv2DBackpropFilter :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Value t - --- | Computes the gradients of convolution with respect to the input. -conv2DBackpropInput :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor v3 t -> Tensor Value t - --- | Computes a 2-D convolution given 4-D input and filter --- tensors. --- --- Given an input tensor of shape `[batch, in_height, in_width, --- in_channels]` and a filter / kernel tensor of shape `[filter_height, --- filter_width, in_channels, out_channels]`, this op performs the --- following: --- ---
    ---
  1. Flattens the filter to a 2-D matrix with shape `[filter_height * --- filter_width * in_channels, output_channels]`.
  2. ---
  3. Extracts image patches from the input tensor to form a *virtual* --- tensor of shape `[batch, out_height, out_width, filter_height * --- filter_width * in_channels]`.
  4. ---
  5. For each patch, right-multiplies the filter matrix and the image --- patch vector.
  6. ---
--- --- In detail, with the default NHWC format, --- --- output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, --- strides[2] * j + dj, q] * filter[di, dj, q, k] --- --- Must have `strides[0] = strides[3] = 1`. For the most common case of --- the same horizontal and vertices strides, `strides = [1, stride, --- stride, 1]`. -conv2D :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Adds bias to value. --- --- This is a special case of `tf.add` where bias is restricted --- to be 1-D. Broadcasting is supported, so value may have any --- number of dimensions. -biasAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Batch normalization. --- --- Note that the size of 4D Tensors are defined by either NHWC or --- NCHW. The size of 1D Tensors matches the dimension C of the 4D --- Tensors. -fusedBatchNorm :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> (Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t) - --- | Gradients for batch normalization. --- --- This op is deprecated. See `tf.nn.batch_normalization`. -batchNormWithGlobalNormalizationGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> (Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t) -batchFFT3D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) -batchIFFT2D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) - --- | Performs average pooling on the input. --- --- Each entry in output is the mean of the corresponding size --- ksize window in value. -avgPool :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t -batchFFT2D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) -batchFFT :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) - --- | Given a quantized tensor described by (input, input_min, input_max), --- outputs a --- --- range that covers the actual values present in that tensor. This op is --- typically used to produce the requested_output_min and --- requested_output_max for Requantize. -requantizationRange :: (TensorType tinput, OneOf '[Int16, Int32, Word16, Word8] tinput) => Tensor v1 tinput -> Tensor v2 Float -> Tensor v3 Float -> (Tensor Value Float, Tensor Value Float) - --- | Convert the quantized input tensor into a lower-precision --- output, using the --- --- output range specified with requested_output_min and --- requested_output_max. +-- | Returns the truth value of x AND y element-wise. -- --
    ---
  • input_min, input_max are scalar floats that specify the --- range for the float interpretation of the input data. For --- example, if input_min is -1.0f and input_max is 1.0f, and we are --- dealing with quint16 quantized data, then a 0 value in the 16-bit data --- should be interpreted as -1.0f, and a 65535 means 1.0f.
  • +--
  • NOTE*: LogicalAnd supports broadcasting. More about +-- broadcasting here
  • --
-requantize :: (TensorType tinput, OneOf '[Int16, Int32, Word16, Word8] tinput, TensorType out_type, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v1 tinput -> Tensor v2 Float -> Tensor v3 Float -> Tensor v4 Float -> Tensor v5 Float -> (Tensor Value out_type, Tensor Value Float, Tensor Value Float) +logicalAnd :: Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool +logicalAnd' :: OpParams -> Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool + +-- | Returns the truth value of NOT x element-wise. +logicalNot :: Tensor v'1 Bool -> Tensor Build Bool +logicalNot' :: OpParams -> Tensor v'1 Bool -> Tensor Build Bool + +-- | Returns the truth value of x OR y element-wise. +-- +--
    +--
  • NOTE*: LogicalOr supports broadcasting. More about +-- broadcasting here
  • +--
+logicalOr :: Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool +logicalOr' :: OpParams -> Tensor v'1 Bool -> Tensor v'2 Bool -> Tensor Build Bool + +-- | Outputs all keys and values in the table. +lookupTableExport :: (MonadBuild m', TensorType tkeys, TensorType tvalues) => Tensor Ref ByteString -> m' ((Tensor Value tkeys, Tensor Value tvalues)) +lookupTableExport' :: (MonadBuild m', TensorType tkeys, TensorType tvalues) => OpParams -> Tensor Ref ByteString -> m' ((Tensor Value tkeys, Tensor Value tvalues)) + +-- | Looks up keys in a table, outputs the corresponding values. +-- +-- The tensor keys must of the same type as the keys of the +-- table. The output values is of the type of the table values. +-- +-- The scalar default_value is the value output for keys not +-- present in the table. It must also be of the same type as the table +-- values. +lookupTableFind :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (Tensor Value tout) +lookupTableFind' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (Tensor Value tout) + +-- | Replaces the contents of the table with the specified keys and values. +-- +-- The tensor keys must be of the same type as the keys of the +-- table. The tensor values must be of the type of the table +-- values. +lookupTableImport :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) +lookupTableImport' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) + +-- | Updates the table to associates keys with values. +-- +-- The tensor keys must be of the same type as the keys of the +-- table. The tensor values must be of the type of the table +-- values. +lookupTableInsert :: (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) +lookupTableInsert' :: (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref ByteString -> Tensor v'2 tin -> Tensor v'3 tout -> m' (ControlNode) + +-- | Computes the number of elements in the given table. +lookupTableSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64) +lookupTableSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64) + +-- | Forwards the input to the output. +-- +-- This operator represents the loop termination condition used by the +-- "pivot" switches of a loop. +loopCond :: Tensor v'1 Bool -> Tensor Build Bool +loopCond' :: OpParams -> Tensor v'1 Bool -> Tensor Build Bool + +-- | Multiply the matrix "a" by the matrix "b". +-- +-- The inputs must be two-dimensional matrices and the inner dimension of +-- "a" (after being transposed if transpose_a is true) must match the +-- outer dimension of "b" (after being transposed if transposed_b is +-- true). +-- +--
    +--
  • Note*: The default kernel implementation for MatMul on GPUs uses +-- cublas.
  • +--
+matMul :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +matMul' :: (OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Returns the set of files matching a pattern. +-- +-- Note that this routine only supports wildcard characters in the +-- basename portion of the pattern, not in the directory portion. +matchingFiles :: Tensor v'1 ByteString -> Tensor Build ByteString +matchingFiles' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString + +-- | Copy a tensor setting everything outside a central band in each +-- innermost matrix +-- +-- to zero. +-- +-- The band part is computed as follows: Assume input +-- has k dimensions `[I, J, K, ..., M, N]`, then the output is a +-- tensor with the same shape where +-- +-- `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, +-- n]`. +-- +-- The indicator function +-- +-- `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) +-- && (num_upper < 0 || (n-m) <= num_upper)`. +-- +-- For example: +-- +-- ```prettyprint # if input is [[ 0, 1, 2, 3] [-1, 0, 1, 2] +-- [-2, -1, 0, 1] [-3, -2, -1, 0]], +-- +-- tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] [-1, 0, 1, 2] +-- [ 0, -1, 0, 1] [ 0, 0, -1, 0]], +-- +-- tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] [-1, 0, 1, 0] +-- [-2, -1, 0, 1] [ 0, -2, -1, 0]] ``` +-- +-- Useful special cases: +-- +-- ```prettyprint tf.matrix_band_part(input, 0, -1) ==> Upper +-- triangular part. tf.matrix_band_part(input, -1, 0) ==> Lower +-- triangular part. tf.matrix_band_part(input, 0, 0) ==> Diagonal. ``` +matrixBandPart :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t +matrixBandPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor Build t + +-- | Computes the determinant of one ore more square matrices. +-- +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices. The output is a tensor containing the +-- determinants for all input submatrices `[..., :, :]`. +matrixDeterminant :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +matrixDeterminant' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Returns a batched diagonal tensor with a given batched diagonal +-- values. +-- +-- Given a diagonal, this operation returns a tensor with the +-- diagonal and everything else padded with zeros. The diagonal +-- is computed as follows: +-- +-- Assume diagonal has k dimensions `[I, J, K, ..., +-- N]`, then the output is a tensor of rank `k+1` with dimensions [I, J, +-- K, ..., N, N]` where: +-- +-- `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. +-- +-- For example: +-- +-- ```prettyprint # diagonal is [[1, 2, 3, 4], [5, 6, 7, 8]] +-- +-- and diagonal.shape = (2, 4) +-- +-- tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, +-- 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, +-- 8]]] +-- +-- which has shape (2, 4, 4) ``` +matrixDiag :: (TensorType t) => Tensor v'1 t -> Tensor Build t +matrixDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Returns the batched diagonal part of a batched tensor. +-- +-- This operation returns a tensor with the diagonal part of the +-- batched input. The diagonal part is computed as +-- follows: +-- +-- Assume input has k dimensions `[I, J, K, ..., M, +-- N]`, then the output is a tensor of rank `k - 1` with dimensions `[I, +-- J, K, ..., min(M, N)]` where: +-- +-- `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. +-- +-- The input must be at least a matrix. +-- +-- For example: +-- +-- ```prettyprint # input is [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, +-- 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, +-- 8]]] +-- +-- and input.shape = (2, 4, 4) +-- +-- tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] +-- +-- which has shape (2, 4) ``` +matrixDiagPart :: (TensorType t) => Tensor v'1 t -> Tensor Build t +matrixDiagPart' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes the inverse of one or more square invertible matrices or +-- their +-- +-- adjoints (conjugate transposes). +-- +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices. The output is a tensor of the same +-- shape as the input containing the inverse for all input submatrices +-- `[..., :, :]`. +-- +-- The op uses LU decomposition with partial pivoting to compute the +-- inverses. +-- +-- If a matrix is not invertible there is no guarantee what the op does. +-- It may detect the condition and raise an exception or it may simply +-- return a garbage result. +matrixInverse :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +matrixInverse' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Returns a batched matrix tensor with new batched diagonal values. +-- +-- Given input and diagonal, this operation returns a +-- tensor with the same shape and values as input, except for +-- the main diagonal of the innermost matrices. These will be overwritten +-- by the values in diagonal. +-- +-- The output is computed as follows: +-- +-- Assume input has `k+1` dimensions `[I, J, K, ..., M, N]` and +-- diagonal has k dimensions `[I, J, K, ..., min(M, +-- N)]`. Then the output is a tensor of rank `k+1` with dimensions `[I, +-- J, K, ..., M, N]` where: +-- +--
    +--
  • `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == +-- n`.
  • +--
  • `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != +-- n`.
  • +--
+matrixSetDiag :: (TensorType t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +matrixSetDiag' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Solves systems of linear equations. +-- +-- Matrix is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices. Rhs is a tensor of shape +-- `[..., M, K]`. The output is a tensor shape `[..., M, K]`. If +-- adjoint is False then each output matrix satisfies +-- `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If +-- adjoint is True then each output matrix satisfies +-- `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. +matrixSolve :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +matrixSolve' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Solves one or more linear least-squares problems. +-- +-- matrix is a tensor of shape `[..., M, N]` whose inner-most 2 +-- dimensions form matrices of size `[M, N]`. Rhs is a tensor of shape +-- `[..., M, K]`. The output is a tensor shape `[..., N, K]` where each +-- output matrix solves each of the equations matrix[..., :, :] * +-- output[..., :, :] = rhs[..., :, :] in the least squares sense. +-- +-- matrix and right-hand sides in the batch: +-- +-- matrix=\(A in Re^{m times n}\), rhs=\(B in Re^{m +-- times k}\), output=\(X in Re^{n times k}\), +-- l2_regularizer=\(lambda\). +-- +-- If fast is True, then the solution is computed by +-- solving the normal equations using Cholesky decomposition. +-- Specifically, if \(m ge n\) then \(X = (A^T A + lambda I)^{-1} A^T +-- B\), which solves the least-squares problem \(X = mathrm{argmin}_{Z in +-- Re^{n times k} } ||A Z - B||_F^2 + lambda ||Z||_F^2\). If \(m lt n\) +-- then output is computed as \(X = A^T (A A^T + lambda I)^{-1} +-- B\), which (for \(lambda = 0\)) is the minimum-norm solution to the +-- under-determined linear system, i.e. \(X = mathrm{argmin}_{Z in Re^{n +-- times k} } ||Z||_F^2 \), subject to \(A Z = B\). Notice that the fast +-- path is only numerically stable when \(A\) is numerically full rank +-- and has a condition number \(mathrm{cond}(A) lt +-- frac{1}{sqrt{epsilon_{mach} } }\) or\(lambda\) is sufficiently large. +-- +-- If fast is False an algorithm based on the numerically +-- robust complete orthogonal decomposition is used. This computes the +-- minimum-norm least-squares solution, even when \(A\) is rank +-- deficient. This path is typically 6-7 times slower than the fast path. +-- If fast is False then l2_regularizer is +-- ignored. +matrixSolveLs :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t +matrixSolveLs' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 Double -> Tensor Build t + +-- | Solves systems of linear equations with upper or lower triangular +-- matrices by +-- +-- backsubstitution. +-- +-- matrix is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices. If lower is True then +-- the strictly upper triangular part of each inner-most matrix is +-- assumed to be zero and not accessed. If lower is False then +-- the strictly lower triangular part of each inner-most matrix is +-- assumed to be zero and not accessed. rhs is a tensor of shape +-- `[..., M, K]`. +-- +-- The output is a tensor of shape `[..., M, K]`. If adjoint is +-- True then the innermost matrices in output` satisfy matrix +-- equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If +-- adjoint is False then the strictly then the innermost +-- matrices in output satisfy matrix equations +-- `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. +matrixTriangularSolve :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +matrixTriangularSolve' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes the maximum of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +max :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t +max' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t + +-- | Performs max pooling on the input. +maxPool :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor Build t +maxPool' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Performs 3D max pooling on the input. +maxPool3D :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t +maxPool3D' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes gradients of max pooling function. +maxPool3DGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 t -> Tensor Build t +maxPool3DGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 t -> Tensor Build t + +-- | Computes gradients of the maxpooling function. +maxPoolGrad :: (OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t +maxPoolGrad' :: (OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t + +-- | Computes gradients of the maxpooling function. +maxPoolGradWithArgmax :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 targmax -> Tensor Build t +maxPoolGradWithArgmax' :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor v'3 targmax -> Tensor Build t + +-- | Performs max pooling on the input and outputs both max values and +-- indices. +-- +-- The indices in argmax are flattened, so that a maximum value +-- at position `[b, y, x, c]` becomes flattened index `((b * height + y) +-- * width + x) * channels + c`. +maxPoolWithArgmax :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build targmax) +maxPoolWithArgmax' :: (OneOf '[Int32, Int64] targmax, OneOf '[Word16, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build targmax) + +-- | Returns the max of x and y (i.e. x > y ? x : y) element-wise. +-- +--
    +--
  • NOTE*: Maximum supports broadcasting. More about +-- broadcasting here
  • +--
+maximum :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +maximum' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes the mean of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +mean :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t +mean' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t + +-- | Forwards the value of an available tensor from inputs to +-- output. +-- +-- Merge waits for at least one of the tensors in +-- inputs to become available. It is usually combined with +-- Switch to implement branching. +-- +-- Merge forwards the first tensor for become available to +-- output, and sets value_index to its index in +-- inputs. +merge :: (TensorType t) => [Tensor v'1 t] -> (Tensor Build t, Tensor Build Int32) +merge' :: (TensorType t) => OpParams -> [Tensor v'1 t] -> (Tensor Build t, Tensor Build Int32) + +-- | Merges summaries. +-- +-- This op creates a `Summary` protocol buffer that contains the +-- union of all the values in the input summaries. +-- +-- When the Op is run, it reports an InvalidArgument error if +-- multiple values in the summaries to merge use the same tag. +mergeSummary :: [Tensor v'1 ByteString] -> Tensor Build ByteString +mergeSummary' :: OpParams -> [Tensor v'1 ByteString] -> Tensor Build ByteString + +-- | V2 format specific: merges the metadata files of sharded checkpoints. +-- The +-- +-- result is one logical checkpoint, with one physical metadata file and +-- renamed data files. +-- +-- Intended for "grouping" multiple checkpoints in a sharded checkpoint +-- setup. +-- +-- If delete_old_dirs is true, attempts to delete recursively the dirname +-- of each path in the input checkpoint_prefixes. This is useful when +-- those paths are non user-facing temporary locations. +mergeV2Checkpoints :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) +mergeV2Checkpoints' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) + +-- | Computes the minimum of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +min :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t +min' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t + +-- | Returns the min of x and y (i.e. x < y ? x : y) element-wise. +-- +--
    +--
  • NOTE*: Minimum supports broadcasting. More about +-- broadcasting here
  • +--
+minimum :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +minimum' :: (OneOf '[Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Pads a tensor with mirrored values. +-- +-- This operation pads a input with mirrored values according to +-- the paddings you specify. paddings is an integer +-- tensor with shape `[n, 2]`, where n is the rank of input. For +-- each dimension D of input, `paddings[D, 0]` indicates how +-- many values to add before the contents of input in that +-- dimension, and `paddings[D, 1]` indicates how many values to add after +-- the contents of input in that dimension. Both `paddings[D, +-- 0]` and `paddings[D, 1]` must be no greater than `input.dim_size(D)` +-- (or `input.dim_size(D) - 1`) if copy_border is true (if +-- false, respectively). +-- +-- The padded size of each dimension D of the output is: +-- +-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` +-- +-- For example: +-- +-- ```prettyprint # t is [[1, 2, 3], [4, 5, 6]]. # +-- paddings is [[1, 1]], [2, 2]]. # mode is SYMMETRIC. +-- # rank of t is 2. pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, +-- 2] [2, 1, 1, 2, 3, 3, 2] [5, 4, 4, 5, 6, 6, 5] [5, 4, 4, 5, 6, 6, 5]] +-- ``` +mirrorPad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t +mirrorPad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t + +-- | Gradient op for MirrorPad op. This op folds a mirror-padded +-- tensor. +-- +-- This operation folds the padded areas of input by +-- MirrorPad according to the paddings you specify. +-- paddings must be the same as paddings argument given +-- to the corresponding MirrorPad op. +-- +-- The folded size of each dimension D of the output is: +-- +-- `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` +-- +-- For example: +-- +-- ```prettyprint # t is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. # +-- paddings is [[0, 1]], [0, 1]]. # mode is SYMMETRIC. +-- # rank of t is 2. pad(t, paddings) ==> [[ 1, 5] [11, 28]] +-- ``` +mirrorPadGrad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t +mirrorPadGrad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t + +-- | Returns element-wise remainder of division. +-- +--
    +--
  • NOTE*: Mod supports broadcasting. More about broadcasting +-- here
  • +--
+mod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +mod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Returns x * y element-wise. +-- +--
    +--
  • NOTE*: Mul supports broadcasting. More about broadcasting +-- here
  • +--
+mul :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +mul' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Draws samples from a multinomial distribution. +multinomial :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> m' (Tensor Value Int64) +multinomial' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> m' (Tensor Value Int64) + +-- | Creates an empty hash table that uses tensors as the backing store. It +-- uses +-- +-- "open addressing" with quadratic reprobing to resolve collisions. +-- +-- This op creates a mutable hash table, specifying the type of its keys +-- and values. Each value must be a scalar. Data can be inserted into the +-- table using the insert operations. It does not support the +-- initialization operation. +mutableDenseHashTable :: (MonadBuild m', TensorType key_dtype) => DataType -> Tensor v'1 key_dtype -> m' (Tensor Ref ByteString) +mutableDenseHashTable' :: (MonadBuild m', TensorType key_dtype) => OpParams -> DataType -> Tensor v'1 key_dtype -> m' (Tensor Ref ByteString) + +-- | Creates an empty hash table. +-- +-- This op creates a mutable hash table, specifying the type of its keys +-- and values. Each value must be a scalar. Data can be inserted into the +-- table using the insert operations. It does not support the +-- initialization operation. +mutableHashTable :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString) +mutableHashTable' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString) + +-- | Creates an empty hash table. +-- +-- This op creates a mutable hash table, specifying the type of its keys +-- and values. Each value must be a vector. Data can be inserted into the +-- table using the insert operations. It does not support the +-- initialization operation. +mutableHashTableOfTensors :: (MonadBuild m') => DataType -> DataType -> m' (Tensor Ref ByteString) +mutableHashTableOfTensors' :: (MonadBuild m') => OpParams -> DataType -> DataType -> m' (Tensor Ref ByteString) + +-- | Computes numerical negative value element-wise. +-- +-- I.e., \(y = -x\). +neg :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +neg' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Training via negative sampling. +negTrain :: (MonadBuild m') => Int64 -> Tensor Ref Float -> Tensor Ref Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor v'5 Float -> m' (ControlNode) +negTrain' :: (MonadBuild m') => OpParams -> Int64 -> Tensor Ref Float -> Tensor Ref Float -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor v'5 Float -> m' (ControlNode) + +-- | Makes its input available to the next iteration. +nextIteration :: (TensorType t) => Tensor v'1 t -> Tensor Build t +nextIteration' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Does nothing. Only useful as a placeholder for control edges. +noOp :: (MonadBuild m') => m' (ControlNode) +noOp' :: (MonadBuild m') => OpParams -> m' (ControlNode) + +-- | Greedily selects a subset of bounding boxes in descending order of +-- score, +-- +-- pruning away boxes that have high intersection-over-union (IOU) +-- overlap with previously selected boxes. Bounding boxes are supplied as +-- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of +-- any diagonal pair of box corners and the coordinates can be provided +-- as normalized (i.e., lying in the interval [0, 1]) or absolute. Note +-- that this algorithm is agnostic to where the origin is in the +-- coordinate system. Note that this algorithm is invariant to orthogonal +-- transformations and translations of the coordinate system; thus +-- translating or reflections of the coordinate system result in the same +-- boxes being selected by the algorithm. +-- +-- The output of this operation is a set of integers indexing into the +-- input collection of bounding boxes representing the selected boxes. +-- The bounding box coordinates corresponding to the selected indices can +-- then be obtained using the `tf.gather operation`. For example: +-- +-- selected_indices = tf.image.non_max_suppression( boxes, scores, +-- max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, +-- selected_indices) +nonMaxSuppression :: Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor Build Int32 +nonMaxSuppression' :: OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Int32 -> Tensor Build Int32 + +-- | Returns the truth value of (x != y) element-wise. +-- +--
    +--
  • NOTE*: NotEqual supports broadcasting. More about +-- broadcasting here
  • +--
+notEqual :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool +notEqual' :: (OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool + +-- | Returns a one-hot tensor. +-- +-- The locations represented by indices in indices take value +-- on_value, while all other locations take value +-- off_value. +-- +-- If the input indices is rank N, the output will have +-- rank `N+1`, The new axis is created at dimension axis +-- (default: the new axis is appended at the end). +-- +-- If indices is a scalar the output shape will be a vector of +-- length depth. +-- +-- If indices is a vector of length features, the +-- output shape will be: ``` features x depth if axis == -1 depth x +-- features if axis == 0 ``` +-- +-- If indices is a matrix (batch) with shape `[batch, +-- features]`, the output shape will be: ``` batch x features x depth if +-- axis == -1 batch x depth x features if axis == 1 depth x batch x +-- features if axis == 0 ``` +-- +-- Examples ========= +-- +-- Suppose that +-- +-- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 +-- axis = -1 ``` +-- +-- Then output is `[4 x 3]`: +-- +-- ```output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) +-- [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ``` +-- +-- Suppose that +-- +-- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0 +-- axis = 0 ``` +-- +-- Then output is `[3 x 4]`: +-- +-- ```output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0 +-- 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^ +-- one_hot(1) ``` Suppose that +-- +-- ``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value = +-- 0.0 axis = -1 ``` +-- +-- Then output is `[2 x 2 x 3]`: +-- +-- ```output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // +-- one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // +-- one_hot(-1) ]``` +oneHot :: (TensorType t, OneOf '[Int32, Int64, Word8] tI) => Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t +oneHot' :: (TensorType t, OneOf '[Int32, Int64, Word8] tI) => OpParams -> Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t + +-- | Packs a list of N rank-R tensors into one +-- rank-`(R+1)` tensor. +-- +-- Packs the N tensors in values into a tensor with +-- rank one higher than each tensor in values, by packing them +-- along the axis dimension. Given a list of tensors of shape +-- `(A, B, C)`; +-- +-- if `axis == 0` then the output tensor will have the shape +-- `(N, A, B, C)`. if `axis == 1` then the output tensor will +-- have the shape `(A, N, B, C)`. Etc. +-- +-- For example: +-- +-- ```prettyprint # x is [1, 4] # y is [2, 5] # +-- z is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # +-- Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, +-- 6]] ``` +-- +-- This is the opposite of unpack. +pack :: (TensorType t) => [Tensor v'1 t] -> Tensor Build t +pack' :: (TensorType t) => OpParams -> [Tensor v'1 t] -> Tensor Build t + +-- | Pads a tensor with zeros. +-- +-- This operation pads a input with zeros according to the +-- paddings you specify. paddings is an integer tensor +-- with shape `[Dn, 2]`, where n is the rank of input. For each +-- dimension D of input, `paddings[D, 0]` indicates how many +-- zeros to add before the contents of input in that dimension, +-- and `paddings[D, 1]` indicates how many zeros to add after the +-- contents of input in that dimension. +-- +-- The padded size of each dimension D of the output is: +-- +-- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` +-- +-- For example: +-- +-- ```prettyprint # t is [[1, 1], [2, 2]] # paddings is +-- [[1, 1], [2, 2]] # rank of t is 2 pad(t, paddings) ==> +-- [[0, 0, 0, 0, 0, 0] [0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, +-- 0, 0]] ``` +pad :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t +pad' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t + +-- | A queue that produces elements in first-in first-out order. +-- +-- Variable-size shapes are allowed by setting the corresponding shape +-- dimensions to 0 in the shape attr. In this case DequeueMany will pad +-- up to the maximum size of any given element in the minibatch. See +-- below for details. +paddingFIFOQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) +paddingFIFOQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) + +-- | A queue that produces elements in first-in first-out order. +-- +-- Variable-size shapes are allowed by setting the corresponding shape +-- dimensions to 0 in the shape attr. In this case DequeueMany will pad +-- up to the maximum size of any given element in the minibatch. See +-- below for details. +paddingFIFOQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle) +paddingFIFOQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle) + +-- | Concatenates a list of N tensors along the first dimension. +-- +-- The input tensors are all required to have size 1 in the first +-- dimension. +-- +-- For example: +-- +-- ```prettyprint # x is [[1, 4]] # y is [[2, 5]] # +-- z is [[3, 6]] parallel_concat([x, y, z]) => [[1, 4], [2, +-- 5], [3, 6]] # Pack along first dim. ``` +-- +-- The difference between concat and parallel_concat is that concat +-- requires all of the inputs be computed before the operation will begin +-- but doesn't require that the input shapes be known during graph +-- construction. Parallel concat will copy pieces of the input into the +-- output as they become available, in some situations this can provide a +-- performance benefit. +parallelConcat :: (TensorType t) => Shape -> [Tensor v'1 t] -> Tensor Build t +parallelConcat' :: (TensorType t) => OpParams -> Shape -> [Tensor v'1 t] -> Tensor Build t + +-- | Outputs random values from a normal distribution. The parameters may +-- each be a +-- +-- scalar which applies to the entire output, or a vector of length +-- shape[0] which stores the parameters for each batch. +parameterizedTruncatedNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 dtype -> Tensor v'3 dtype -> Tensor v'4 dtype -> Tensor v'5 dtype -> m' (Tensor Value dtype) +parameterizedTruncatedNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 dtype -> Tensor v'3 dtype -> Tensor v'4 dtype -> Tensor v'5 dtype -> m' (Tensor Value dtype) + +-- | Transforms a vector of brain.Example protos (as strings) into typed +-- tensors. +parseExample :: (OneOfs '[ByteString, Int64, Float] sparse_types, OneOfs '[ByteString, Int64, Float] tdense) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> TensorList (v'5) tdense -> ([Tensor Build Int64], TensorList (Build) sparse_types, [Tensor Build Int64], TensorList (Build) tdense) +parseExample' :: (OneOfs '[ByteString, Int64, Float] sparse_types, OneOfs '[ByteString, Int64, Float] tdense) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> TensorList (v'5) tdense -> ([Tensor Build Int64], TensorList (Build) sparse_types, [Tensor Build Int64], TensorList (Build) tdense) + +-- | Transforms a scalar brain.SequenceExample proto (as strings) into +-- typed tensors. +parseSingleSequenceExample :: (OneOfs '[ByteString, Int64, Float] context_sparse_types, OneOfs '[ByteString, Int64, Float] tcontext_dense, OneOfs '[ByteString, Int64, Float] feature_list_dense_types, OneOfs '[ByteString, Int64, Float] feature_list_sparse_types) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> [Tensor v'5 ByteString] -> [Tensor v'6 ByteString] -> TensorList (v'7) tcontext_dense -> Tensor v'8 ByteString -> ([Tensor Build Int64], TensorList (Build) context_sparse_types, [Tensor Build Int64], TensorList (Build) tcontext_dense, [Tensor Build Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Int64], TensorList (Build) feature_list_dense_types) +parseSingleSequenceExample' :: (OneOfs '[ByteString, Int64, Float] context_sparse_types, OneOfs '[ByteString, Int64, Float] tcontext_dense, OneOfs '[ByteString, Int64, Float] feature_list_dense_types, OneOfs '[ByteString, Int64, Float] feature_list_sparse_types) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> [Tensor v'3 ByteString] -> [Tensor v'4 ByteString] -> [Tensor v'5 ByteString] -> [Tensor v'6 ByteString] -> TensorList (v'7) tcontext_dense -> Tensor v'8 ByteString -> ([Tensor Build Int64], TensorList (Build) context_sparse_types, [Tensor Build Int64], TensorList (Build) tcontext_dense, [Tensor Build Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Int64], TensorList (Build) feature_list_dense_types) + +-- | Transforms a serialized tensorflow.TensorProto proto into a Tensor. +parseTensor :: (TensorType out_type) => Tensor v'1 ByteString -> Tensor Build out_type +parseTensor' :: (TensorType out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type + +-- | A placeholder op for a value that will be fed into the computation. +-- +-- N.B. This operation will fail with an error if it is executed. It is +-- intended as a way to represent a value that will always be fed, and to +-- provide attrs that enable the fed value to be checked at runtime. +placeholder :: (TensorType dtype) => Tensor Build dtype +placeholder' :: (TensorType dtype) => OpParams -> Tensor Build dtype + +-- | A placeholder op for a value that will be fed into the computation. +-- +-- N.B. This operation will fail with an error if it is executed. It is +-- intended as a way to represent a value that will always be fed, and to +-- provide attrs that enable the fed value to be checked at runtime. +placeholderV2 :: (TensorType dtype) => Shape -> Tensor Build dtype +placeholderV2' :: (TensorType dtype) => OpParams -> Shape -> Tensor Build dtype + +-- | A placeholder op that passes through input when its output is +-- not fed. +placeholderWithDefault :: (TensorType dtype) => Shape -> Tensor v'1 dtype -> Tensor Build dtype +placeholderWithDefault' :: (TensorType dtype) => OpParams -> Shape -> Tensor v'1 dtype -> Tensor Build dtype + +-- | Compute the polygamma function \(psi^{(n)}(x)\). +-- +-- The polygamma function is defined as: +-- +-- ``` psi^{(n)}(x) = frac{d^n}{dx^n} psi(x) ``` where \(psi(x)\) is the +-- digamma function. +polygamma :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +polygamma' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes the power of one value to another. +-- +-- Given a tensor x and a tensor y, this operation +-- computes \(x^y\) for corresponding elements in x and +-- y. For example: +-- +-- ``` # tensor x is [[2, 2]], [3, 3]] # tensor y is +-- [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]] ``` +pow :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +pow' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | An identity op that triggers an error if a gradient is requested. +-- +-- When executed in a graph, this op outputs its input tensor as-is. +-- +-- When building ops to compute gradients, the TensorFlow gradient system +-- will return an error when trying to lookup the gradient of this op, +-- because no gradient must ever be registered for this function. This op +-- exists to prevent subtle bugs from silently returning unimplemented +-- gradients in some corner cases. +preventGradient :: (TensorType t) => Tensor v'1 t -> Tensor Build t +preventGradient' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Prints a list of tensors. +-- +-- Passes input through to output and prints `data` +-- when evaluating. +print :: (MonadBuild m', TensorType t, TensorTypes u) => Tensor v'1 t -> TensorList (v'2) u -> m' (Tensor Value t) +print' :: (MonadBuild m', TensorType t, TensorTypes u) => OpParams -> Tensor v'1 t -> TensorList (v'2) u -> m' (Tensor Value t) + +-- | A queue that produces elements sorted by the first component value. +-- +-- Note that the PriorityQueue requires the first component of any +-- element to be a scalar int64, in addition to the other elements +-- declared by component_types. Therefore calls to Enqueue and +-- EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will +-- all require (resp. output) one extra entry in their input (resp. +-- output) lists. +priorityQueue :: (MonadBuild m') => m' (Tensor Ref ByteString) +priorityQueue' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) + +-- | A queue that produces elements sorted by the first component value. +-- +-- Note that the PriorityQueue requires the first component of any +-- element to be a scalar int64, in addition to the other elements +-- declared by component_types. Therefore calls to Enqueue and +-- EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will +-- all require (resp. output) one extra entry in their input (resp. +-- output) lists. +priorityQueueV2 :: (MonadBuild m') => m' (ResourceHandle) +priorityQueueV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) + +-- | Computes the product of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +prod :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t +prod' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t + +-- | Computes the QR decompositions of one or more matrices. +-- +-- Computes the QR decomposition of each inner matrix in tensor +-- such that `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` +-- +-- ```prettyprint # a is a tensor. # q is a tensor of orthonormal +-- matrices. # r is a tensor of upper triangular matrices. q, r = qr(a) +-- q_full, r_full = qr(a, full_matrices=True) ``` +qr :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t) +qr' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t) + +-- | Quantizes then dequantizes a tensor. +-- +-- This op simulates the precision loss from the quantized forward pass +-- by: 1. Quantizing the tensor to fixed point numbers, which should +-- match the target quantization method when it is used in inference. 2. +-- Dequantizing it back to floating point numbers for the following ops, +-- most likely matmul. +-- +-- There are different ways to quantize. This version does not use the +-- full range of the output type, choosing to elide the lowest possible +-- value for symmetry (e.g., output range is -127 to 127, not -128 to 127 +-- for signed 8 bit quantization), so that 0.0 maps to 0. +-- +-- To perform this op, we first find the range of values in our tensor. +-- The range we use is always centered on 0, so we find m such that +-- +--
    +--
  1. m = max(abs(input_min), abs(input_max)) if range_given is +-- true,
  2. +--
  3. m = max(max(abs(min_elem(input)), abs(max_elem(input))) +-- otherwise.
  4. +--
+-- +-- Our input tensor range is then [-m, m]. +-- +-- Next, we choose our fixed-point quantization buckets, [min_fixed, +-- max_fixed]. If signed_input is true, this is +-- +--
    +--
  • min_fixed, max_fixed =
  • +--
  • -(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - +-- 1 .
  • +--
+-- +-- Otherwise, if signed_input is false, the fixed-point range is +-- +--
    +--
  • min_fixed, max_fixed = [0, (1 << num_bits) - 1].
  • +--
+-- +-- From this we compute our scaling factor, s: +-- +-- s = (max_fixed - min_fixed) / (2 * m). +-- +-- Now we can quantize and dequantize the elements of our tensor. An +-- element e is transformed into e': +-- +-- e' = (e * s).round_to_nearest() / s. +-- +-- Note that we have a different number of buckets in the signed vs. +-- unsigned cases. For example, if num_bits == 8, we get 254 buckets in +-- the signed case vs. 255 in the unsigned case. +-- +-- For example, suppose num_bits = 8 and m = 1. Then +-- +--
    +--
  • min_fixed, max_fixed = [-127, 127], and s = (127 + 127) / 2 +-- = 127.
  • +--
+-- +-- Given the vector {-1, -0.5, 0, 0.3}, this is quantized to {-127, -63, +-- 0, 38}, and dequantized to {-1, -63.0127, 0, 38.0127}. +quantizeAndDequantize :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +quantizeAndDequantize' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Convert the quantized input tensor into a lower-precision -- output, using the @@ -336,7 +3465,103 @@ requantize :: (TensorType tinput, OneOf '[Int16, Int32, Word16, Word8] tinput, T -- fraction of the possible range. By feeding that output into this -- operator, we can reduce it from 32 bits down to 8 with minimal loss of -- accuracy. -quantizeDownAndShrinkRange :: (TensorType tinput, OneOf '[Int16, Int32, Word16, Word8] tinput, TensorType out_type, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v1 tinput -> Tensor v2 Float -> Tensor v3 Float -> (Tensor Value out_type, Tensor Value Float, Tensor Value Float) +quantizeDownAndShrinkRange :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) +quantizeDownAndShrinkRange' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) + +-- | Quantize the input tensor of type float to output +-- tensor of type T. +-- +--
    +--
  • min_range, max_range are scalar floats that specify the +-- range for the input data. The mode attribute +-- controls exactly which calculations are used to convert the float +-- values to their quantized equivalents.
  • +--
+-- +-- In MIN_COMBINED mode, each value of the tensor will undergo +-- the following: +-- +-- ``` out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) +-- if T == qint8, out[i] -= (range(T) + 1) / 2.0 ``` here `range(T) = +-- numeric_limitsT::max() - numeric_limitsT::min()` +-- +--
    +--
  • MIN_COMBINED Mode Example*
  • +--
+-- +-- Assume the input is type float and has a possible range of [0.0, 6.0] +-- and the output type is quint8 ([0, 255]). The min_range and max_range +-- values should be specified as 0.0 and 6.0. Quantizing from float to +-- quint8 will multiply each value of the input by 255/6 and cast to +-- quint8. +-- +-- If the output type was qint8 ([-128, 127]), the operation will +-- additionally subtract each value by 128 prior to casting, so that the +-- range of values aligns with the range of qint8. +-- +-- If the mode is MIN_FIRST, then this approach is used: +-- +-- ``` number_of_steps = 1 << (# of bits in T) range_adjust = +-- number_of_steps / (number_of_steps - 1) range = (range_max - +-- range_min) * range_adjust range_scale = number_of_steps / range +-- quantized = round(input * range_scale) - round(range_min * +-- range_scale) + numeric_limitsT::min() quantized = +-- max(quantized, numeric_limitsT::min()) quantized = +-- min(quantized, numeric_limitsT::max()) ``` +-- +-- The biggest difference between this and MIN_COMBINED is that the +-- minimum range is rounded first, before it's subtracted from the +-- rounded value. With MIN_COMBINED, a small bias is introduced where +-- repeated iterations of quantizing and dequantizing will introduce a +-- larger and larger error. +-- +-- One thing to watch out for is that the operator may choose to adjust +-- the requested minimum and maximum values slightly during the +-- quantization process, so you should always use the output ports as the +-- range for further calculations. For example, if the requested minimum +-- and maximum values are close to equal, they will be separated by a +-- small epsilon value to prevent ill-formed quantized buffers from being +-- created. Otherwise, you can end up with buffers where all the +-- quantized values map to the same float value, which causes problems +-- for operations that have to perform further calculations on them. +quantizeV2 :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) +quantizeV2' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) + +-- | Produces the average pool of the input tensor for quantized types. +quantizedAvgPool :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) +quantizedAvgPool' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) + +-- | Quantized Batch normalization. +-- +-- This op is deprecated and will be removed in the future. Prefer +-- `tf.nn.batch_normalization`. +quantizedBatchNormWithGlobalNormalization :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Bool -> Float -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 tinput -> Tensor v'5 Float -> Tensor v'6 Float -> Tensor v'7 tinput -> Tensor v'8 Float -> Tensor v'9 Float -> Tensor v'10 tinput -> Tensor v'11 Float -> Tensor v'12 Float -> Tensor v'13 tinput -> Tensor v'14 Float -> Tensor v'15 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) +quantizedBatchNormWithGlobalNormalization' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Bool -> Float -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 tinput -> Tensor v'5 Float -> Tensor v'6 Float -> Tensor v'7 tinput -> Tensor v'8 Float -> Tensor v'9 Float -> Tensor v'10 tinput -> Tensor v'11 Float -> Tensor v'12 Float -> Tensor v'13 tinput -> Tensor v'14 Float -> Tensor v'15 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) + +-- | Adds Tensor bias to Tensor input for Quantized +-- types. +-- +-- Broadcasts the values of bias on dimensions 0..N-2 of input. +quantizedBiasAdd :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) +quantizedBiasAdd' :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) + +-- | Concatenates quantized tensors along one dimension. +quantizedConcat :: (TensorType t) => Tensor v'1 Int32 -> [Tensor v'2 t] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> (Tensor Build t, Tensor Build Float, Tensor Build Float) +quantizedConcat' :: (TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> (Tensor Build t, Tensor Build Float, Tensor Build Float) + +-- | Computes a 2D convolution given quantized 4D input and filter tensors. +-- +-- The inputs are quantized tensors where the lowest value represents the +-- real number of the associated minimum, and the highest represents the +-- maximum. This means that you can only interpret the quantized output +-- in the same way, by taking the returned minimum and maximum values +-- into account. +quantizedConv2D :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] tfilter, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 tfilter -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) +quantizedConv2D' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] tfilter, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 tfilter -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) + +-- | Quantized Instance normalization. +quantizedInstanceNorm :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) +quantizedInstanceNorm' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- | Perform a quantized matrix multiplication of a by the matrix -- b. @@ -345,109 +3570,439 @@ quantizeDownAndShrinkRange :: (TensorType tinput, OneOf '[Int16, Int32, Word16, -- a (after being transposed if transpose_a is -- non-zero) must match the outer dimension of b (after being -- transposed if transposed_b is non-zero). -quantizedMatMul :: (TensorType t1, OneOf '[Int16, Int32, Word16, Word8] t1, TensorType t2, OneOf '[Int16, Int32, Word16, Word8] t2, TensorType toutput, OneOf '[Int16, Int32, Word16, Word8] toutput) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor v3 Float -> Tensor v4 Float -> Tensor v5 Float -> Tensor v6 Float -> (Tensor Value toutput, Tensor Value Float, Tensor Value Float) +quantizedMatMul :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) => Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) +quantizedMatMul' :: (OneOf '[Int16, Int32, Word16, Word8] t1, OneOf '[Int16, Int32, Word16, Word8] t2, OneOf '[Int16, Int32, Word16, Word8] toutput) => OpParams -> Tensor v'1 t1 -> Tensor v'2 t2 -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> Tensor v'6 Float -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) --- | Compute the cumulative product of the tensor x along --- axis. --- --- By default, this op performs an inclusive cumprod, which means that --- the first element of the input is identical to the first element of --- the output: ```prettyprint tf.cumprod([a, b, c]) ==> [a, a * b, a * --- b * c] ``` --- --- By setting the exclusive kwarg to True, an exclusive --- cumprod is performed instead: ```prettyprint tf.cumprod([a, b, c], --- exclusive=True) ==> [0, a, a * b] ``` --- --- By setting the reverse kwarg to True, the cumprod is --- performed in the opposite direction: ```prettyprint tf.cumprod([a, b, --- c], reverse=True) ==> [a * b * c, b * c, c] ``` This is more --- efficient than using separate `tf.reverse` ops. --- --- The reverse and exclusive kwargs can also be combined: --- ```prettyprint tf.cumprod([a, b, c], exclusive=True, reverse=True) --- ==> [b * c, c, 0] ``` -cumprod :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t +-- | Produces the max pool of the input tensor for quantized types. +quantizedMaxPool :: (OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) +quantizedMaxPool' :: (OneOf '[Int16, Int32, Word16, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) --- | Compute the cumulative sum of the tensor x along --- axis. --- --- By default, this op performs an inclusive cumsum, which means that the --- first element of the input is identical to the first element of the --- output: ```prettyprint tf.cumsum([a, b, c]) ==> [a, a + b, a + b + --- c] ``` --- --- By setting the exclusive kwarg to True, an exclusive --- cumsum is performed instead: ```prettyprint tf.cumsum([a, b, c], --- exclusive=True) ==> [0, a, a + b] ``` --- --- By setting the reverse kwarg to True, the cumsum is --- performed in the opposite direction: ```prettyprint tf.cumsum([a, b, --- c], reverse=True) ==> [a + b + c, b + c, c] ``` This is more --- efficient than using separate `tf.reverse` ops. --- --- The reverse and exclusive kwargs can also be combined: --- ```prettyprint tf.cumsum([a, b, c], exclusive=True, reverse=True) --- ==> [b + c, c, 0] ``` -cumsum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t +-- | Computes Quantized Rectified Linear: `max(features, 0)` +quantizedRelu :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) +quantizedRelu' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) --- | Compute the pairwise cross product. --- --- a and b must be the same shape; they can either be --- simple 3-element vectors, or any shape where the innermost dimension --- is 3. In the latter case, each pair of corresponding 3-element vectors --- is cross-multiplied independently. -cross :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t +-- | Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` +quantizedRelu6 :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) +quantizedRelu6' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) --- | Compute the inverse 3-dimensional discrete Fourier Transform over the --- inner-most --- --- 3 dimensions of input. -iFFT3D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) +-- | Computes Quantized Rectified Linear X: `min(max(features, 0), +-- max_value)` +quantizedReluX :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) +quantizedReluX' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) --- | Compute the 3-dimensional discrete Fourier Transform over the --- inner-most 3 +-- | Reshapes a quantized tensor as per the Reshape op. -- --- dimensions of input. -fFT3D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) +-- ``` +quantizedReshape :: (TensorType t, OneOf '[Int32, Int64] tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) +quantizedReshape' :: (TensorType t, OneOf '[Int32, Int64] tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor v'3 Float -> Tensor v'4 Float -> (Tensor Build t, Tensor Build Float, Tensor Build Float) --- | Computes gradients of the maxpooling function. -maxPoolGradWithArgmax :: (TensorType targmax, OneOf '[Int32, Int64] targmax, TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 targmax -> Tensor Value t +-- | Closes the given queue. +-- +-- This operation signals that no more elements will be enqueued in the +-- given queue. Subsequent Enqueue(Many) operations will fail. Subsequent +-- Dequeue(Many) operations will continue to succeed if sufficient +-- elements remain in the queue. Subsequent Dequeue(Many) operations that +-- would block will fail immediately. +queueClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) +queueClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) --- | Compute the 2-dimensional discrete Fourier Transform over the --- inner-most +-- | Closes the given queue. -- --- 2 dimensions of input. -fFT2D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) +-- This operation signals that no more elements will be enqueued in the +-- given queue. Subsequent Enqueue(Many) operations will fail. Subsequent +-- Dequeue(Many) operations will continue to succeed if sufficient +-- elements remain in the queue. Subsequent Dequeue(Many) operations that +-- would block will fail immediately. +queueCloseV2 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode) +queueCloseV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode) --- | Compute the inverse 1-dimensional discrete Fourier Transform over the --- inner-most +-- | Dequeues a tuple of one or more tensors from the given queue. -- --- dimension of input. -iFFT :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) +-- This operation has k outputs, where k is the number of components in +-- the tuples stored in the given queue, and output i is the ith +-- component of the dequeued tuple. +-- +-- N.B. If the queue is empty, this operation will block until an element +-- has been dequeued (or timeout_ms elapses, if specified). +queueDequeue :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> m' (TensorList (Value) component_types) +queueDequeue' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> m' (TensorList (Value) component_types) --- | Compute the 1-dimensional discrete Fourier Transform over the --- inner-most +-- | Dequeues n tuples of one or more tensors from the given queue. -- --- dimension of input. -fFT :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) +-- If the queue is closed and there are fewer than n elements, then an +-- OutOfRange error is returned. +-- +-- This operation concatenates queue-element component tensors along the +-- 0th dimension to make a single component tensor. All of the components +-- in the dequeued tuple will have size n in the 0th dimension. +-- +-- This operation has k outputs, where k is the number of components in +-- the tuples stored in the given queue, and output i is the ith +-- component of the dequeued tuple. +-- +-- N.B. If the queue is empty, this operation will block until n elements +-- have been dequeued (or timeout_ms elapses, if specified). +queueDequeueMany :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) +queueDequeueMany' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) --- | Returns the complex conjugate of a complex number. +-- | Dequeues n tuples of one or more tensors from the given queue. -- --- Given a tensor input of complex numbers, this operation --- returns a tensor of complex numbers that are the complex conjugate of --- each element in input. The complex numbers in input --- must be of the form \(a + bj\), where *a* is the real part and *b* is --- the imaginary part. +-- If the queue is closed and there are fewer than n elements, then an +-- OutOfRange error is returned. -- --- The complex conjugate returned by this operation is of the form \(a - --- bj\). +-- This operation concatenates queue-element component tensors along the +-- 0th dimension to make a single component tensor. All of the components +-- in the dequeued tuple will have size n in the 0th dimension. +-- +-- This operation has k outputs, where k is the number of components in +-- the tuples stored in the given queue, and output i is the ith +-- component of the dequeued tuple. +-- +-- N.B. If the queue is empty, this operation will block until n elements +-- have been dequeued (or timeout_ms elapses, if specified). +queueDequeueManyV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) +queueDequeueManyV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) + +-- | Dequeues n tuples of one or more tensors from the given queue. +-- +-- This operation is not supported by all queues. If a queue does not +-- support DequeueUpTo, then an Unimplemented error is returned. +-- +-- If the queue is closed and there are more than 0 but less than n +-- elements remaining, then instead of returning an OutOfRange error like +-- QueueDequeueMany, less than n elements are returned +-- immediately. If the queue is closed and there are 0 elements left in +-- the queue, then an OutOfRange error is returned just like in +-- QueueDequeueMany. Otherwise the behavior is identical to +-- QueueDequeueMany: +-- +-- This operation concatenates queue-element component tensors along the +-- 0th dimension to make a single component tensor. All of the components +-- in the dequeued tuple will have size n in the 0th dimension. +-- +-- This operation has k outputs, where k is the number of components in +-- the tuples stored in the given queue, and output i is the ith +-- component of the dequeued tuple. +queueDequeueUpTo :: (MonadBuild m', TensorTypes component_types) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) +queueDequeueUpTo' :: (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) + +-- | Dequeues n tuples of one or more tensors from the given queue. +-- +-- This operation is not supported by all queues. If a queue does not +-- support DequeueUpTo, then an Unimplemented error is returned. +-- +-- If the queue is closed and there are more than 0 but less than n +-- elements remaining, then instead of returning an OutOfRange error like +-- QueueDequeueMany, less than n elements are returned +-- immediately. If the queue is closed and there are 0 elements left in +-- the queue, then an OutOfRange error is returned just like in +-- QueueDequeueMany. Otherwise the behavior is identical to +-- QueueDequeueMany: +-- +-- This operation concatenates queue-element component tensors along the +-- 0th dimension to make a single component tensor. All of the components +-- in the dequeued tuple will have size n in the 0th dimension. +-- +-- This operation has k outputs, where k is the number of components in +-- the tuples stored in the given queue, and output i is the ith +-- component of the dequeued tuple. +queueDequeueUpToV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) +queueDequeueUpToV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> m' (TensorList (Value) component_types) + +-- | Dequeues a tuple of one or more tensors from the given queue. +-- +-- This operation has k outputs, where k is the number of components in +-- the tuples stored in the given queue, and output i is the ith +-- component of the dequeued tuple. +-- +-- N.B. If the queue is empty, this operation will block until an element +-- has been dequeued (or timeout_ms elapses, if specified). +queueDequeueV2 :: (MonadBuild m', TensorTypes component_types) => ResourceHandle -> m' (TensorList (Value) component_types) +queueDequeueV2' :: (MonadBuild m', TensorTypes component_types) => OpParams -> ResourceHandle -> m' (TensorList (Value) component_types) + +-- | Enqueues a tuple of one or more tensors in the given queue. +-- +-- The components input has k elements, which correspond to the +-- components of tuples stored in the given queue. +-- +-- N.B. If the queue is full, this operation will block until the given +-- element has been enqueued (or timeout_ms elapses, if +-- specified). +queueEnqueue :: (MonadBuild m', TensorTypes tcomponents) => Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) +queueEnqueue' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) + +-- | Enqueues zero or more tuples of one or more tensors in the given +-- queue. +-- +-- This operation slices each component tensor along the 0th dimension to +-- make multiple queue elements. All of the tuple components must have +-- the same size in the 0th dimension. +-- +-- The components input has k elements, which correspond to the +-- components of tuples stored in the given queue. +-- +-- N.B. If the queue is full, this operation will block until the given +-- elements have been enqueued (or timeout_ms elapses, if +-- specified). +queueEnqueueMany :: (MonadBuild m', TensorTypes tcomponents) => Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) +queueEnqueueMany' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref ByteString -> TensorList (v'2) tcomponents -> m' (ControlNode) + +-- | Enqueues zero or more tuples of one or more tensors in the given +-- queue. +-- +-- This operation slices each component tensor along the 0th dimension to +-- make multiple queue elements. All of the tuple components must have +-- the same size in the 0th dimension. +-- +-- The components input has k elements, which correspond to the +-- components of tuples stored in the given queue. +-- +-- N.B. If the queue is full, this operation will block until the given +-- elements have been enqueued (or timeout_ms elapses, if +-- specified). +queueEnqueueManyV2 :: (MonadBuild m', TensorTypes tcomponents) => ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) +queueEnqueueManyV2' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) + +-- | Enqueues a tuple of one or more tensors in the given queue. +-- +-- The components input has k elements, which correspond to the +-- components of tuples stored in the given queue. +-- +-- N.B. If the queue is full, this operation will block until the given +-- element has been enqueued (or timeout_ms elapses, if +-- specified). +queueEnqueueV2 :: (MonadBuild m', TensorTypes tcomponents) => ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) +queueEnqueueV2' :: (MonadBuild m', TensorTypes tcomponents) => OpParams -> ResourceHandle -> TensorList (v'2) tcomponents -> m' (ControlNode) + +-- | Computes the number of elements in the given queue. +queueSize :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int32) +queueSize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int32) + +-- | Computes the number of elements in the given queue. +queueSizeV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int32) +queueSizeV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int32) + +-- | Converts one or more images from RGB to HSV. +-- +-- Outputs a tensor of the same shape as the images tensor, +-- containing the HSV value of the pixels. The output is only well +-- defined if the value in images are in `[0,1]`. +-- +-- `output[..., 0]` contains hue, `output[..., 1]` contains saturation, +-- and `output[..., 2]` contains value. All HSV values are in `[0,1]`. A +-- hue of 0 corresponds to pure red, hue 13 is pure green, and 23 +-- is pure blue. +rGBToHSV :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +rGBToHSV' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Randomly crop image. +-- +-- size is a 1-D int64 tensor with 2 elements representing the +-- crop height and width. The values must be non negative. +-- +-- This Op picks a random location in image and crops a +-- height by width rectangle from that location. The +-- random location is picked so the cropped area will fit inside the +-- original image. +randomCrop :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int64 -> m' (Tensor Value t) +randomCrop' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> m' (Tensor Value t) + +-- | Outputs random values from the Gamma distribution(s) described by +-- alpha. +-- +-- This op uses the algorithm by Marsaglia et al. to acquire samples via +-- transformation-rejection from pairs of uniform and normal random +-- variables. See http://dl.acm.org/citation.cfm?id=358414 +randomGamma :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] t) => Tensor v'1 s -> Tensor v'2 t -> m' (Tensor Value t) +randomGamma' :: (MonadBuild m', OneOf '[Int32, Int64] s, OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 s -> Tensor v'2 t -> m' (Tensor Value t) + +-- | Randomly shuffles a tensor along its first dimension. +-- +-- The tensor is shuffled along dimension 0, such that each `value[j]` is +-- mapped to one and only one `output[i]`. For example, a mapping that +-- might occur for a 3x2 tensor is: +-- +-- ```prettyprint [[1, 2], [[5, 6], [3, 4], ==> [1, 2], [5, 6]] [3, +-- 4]] ``` +randomShuffle :: (MonadBuild m', TensorType t) => Tensor v'1 t -> m' (Tensor Value t) +randomShuffle' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -> m' (Tensor Value t) + +-- | A queue that randomizes the order of elements. +randomShuffleQueue :: (MonadBuild m') => [DataType] -> m' (Tensor Ref ByteString) +randomShuffleQueue' :: (MonadBuild m') => OpParams -> [DataType] -> m' (Tensor Ref ByteString) + +-- | A queue that randomizes the order of elements. +randomShuffleQueueV2 :: (MonadBuild m') => [DataType] -> m' (ResourceHandle) +randomShuffleQueueV2' :: (MonadBuild m') => OpParams -> [DataType] -> m' (ResourceHandle) + +-- | Outputs random values from a normal distribution. +-- +-- The generated values will have mean 0 and standard deviation 1. +randomStandardNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype) +randomStandardNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype) + +-- | Outputs random values from a uniform distribution. +-- +-- The generated values follow a uniform distribution in the range `[0, +-- 1)`. The lower bound 0 is included in the range, while the upper bound +-- 1 is excluded. +randomUniform :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype) +randomUniform' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype) + +-- | Outputs random integers from a uniform distribution. +-- +-- The generated values are uniform integers in the range `[minval, +-- maxval)`. The lower bound minval is included in the range, +-- while the upper bound maxval is excluded. +-- +-- The random integers are slightly biased unless `maxval - minval` is an +-- exact power of two. The bias is small for values of `maxval - minval` +-- significantly smaller than the range of the output (either `2^32` or +-- `2^64`). +randomUniformInt :: (MonadBuild m', OneOf '[Int32, Int64] tout, OneOf '[Int32, Int64] t) => Tensor v'1 t -> Tensor v'2 tout -> Tensor v'3 tout -> m' (Tensor Value tout) +randomUniformInt' :: (MonadBuild m', OneOf '[Int32, Int64] tout, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> Tensor v'2 tout -> Tensor v'3 tout -> m' (Tensor Value tout) + +-- | Creates a sequence of numbers. +-- +-- This operation creates a sequence of numbers that begins at +-- start and extends by increments of delta up to but +-- not including limit. -- -- For example: -- --- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] --- tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] ``` -conj :: (TensorType t, OneOf '[Complex Double, Complex Float] t) => Tensor v1 t -> Tensor Value t +-- ``` # start is 3 # limit is 18 # delta is 3 +-- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] ``` +range :: (OneOf '[Int32, Int64, Double, Float] tidx) => Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx +range' :: (OneOf '[Int32, Int64, Double, Float] tidx) => OpParams -> Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx + +-- | Returns the rank of a tensor. +-- +-- This operation returns an integer representing the rank of +-- input. +-- +-- For example: +-- +-- ```prettyprint # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], +-- [4, 4, 4]]] # shape of tensor t is [2, 2, 3] rank(t) ==> 3 +-- ``` +-- +--
    +--
  • *Note**: The rank of a tensor is not the same as the rank of a +-- matrix. The rank of a tensor is the number of indices required to +-- uniquely select each element of the tensor. Rank is also known as +-- "order", "degree", or "ndims."
  • +--
+rank :: (TensorType t) => Tensor v'1 t -> Tensor Build Int32 +rank' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build Int32 + +-- | Reads and outputs the entire contents of the input filename. +readFile :: Tensor v'1 ByteString -> Tensor Build ByteString +readFile' :: OpParams -> Tensor v'1 ByteString -> Tensor Build ByteString + +-- | Reads the value of a variable. +-- +-- The tensor returned by this operation is immutable. +-- +-- The value returned by this operation is guaranteed to be influenced by +-- all the writes on which this operation depends directly or indirectly, +-- and to not be influenced by any of the writes which depend directly or +-- indirectly on this operation. +readVariableOp :: (MonadBuild m', TensorType dtype) => ResourceHandle -> m' (Tensor Value dtype) +readVariableOp' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> m' (Tensor Value dtype) + +-- | Returns the number of records this Reader has produced. +-- +-- This is the same as the number of ReaderRead executions that have +-- succeeded. +readerNumRecordsProduced :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64) +readerNumRecordsProduced' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64) + +-- | Returns the number of records this Reader has produced. +-- +-- This is the same as the number of ReaderRead executions that have +-- succeeded. +readerNumRecordsProducedV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int64) +readerNumRecordsProducedV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int64) + +-- | Returns the number of work units this Reader has finished processing. +readerNumWorkUnitsCompleted :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value Int64) +readerNumWorkUnitsCompleted' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value Int64) + +-- | Returns the number of work units this Reader has finished processing. +readerNumWorkUnitsCompletedV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Int64) +readerNumWorkUnitsCompletedV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Int64) + +-- | Returns the next record (key, value pair) produced by a Reader. +-- +-- Will dequeue from the input queue if necessary (e.g. when the Reader +-- needs to start reading from a new file since it has finished with the +-- previous file). +readerRead :: (MonadBuild m') => Tensor Ref ByteString -> Tensor Ref ByteString -> m' ((Tensor Value ByteString, Tensor Value ByteString)) +readerRead' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor Ref ByteString -> m' ((Tensor Value ByteString, Tensor Value ByteString)) + +-- | Returns up to num_records (key, value) pairs produced by a +-- Reader. +-- +-- Will dequeue from the input queue if necessary (e.g. when the Reader +-- needs to start reading from a new file since it has finished with the +-- previous file). It may return less than num_records even +-- before the last batch. +readerReadUpTo :: (MonadBuild m') => Tensor Ref ByteString -> Tensor Ref ByteString -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) +readerReadUpTo' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor Ref ByteString -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) + +-- | Returns up to num_records (key, value) pairs produced by a +-- Reader. +-- +-- Will dequeue from the input queue if necessary (e.g. when the Reader +-- needs to start reading from a new file since it has finished with the +-- previous file). It may return less than num_records even +-- before the last batch. +readerReadUpToV2 :: (MonadBuild m') => ResourceHandle -> ResourceHandle -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) +readerReadUpToV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 Int64 -> m' ((Tensor Value ByteString, Tensor Value ByteString)) + +-- | Returns the next record (key, value pair) produced by a Reader. +-- +-- Will dequeue from the input queue if necessary (e.g. when the Reader +-- needs to start reading from a new file since it has finished with the +-- previous file). +readerReadV2 :: (MonadBuild m') => ResourceHandle -> ResourceHandle -> m' ((Tensor Value ByteString, Tensor Value ByteString)) +readerReadV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> ResourceHandle -> m' ((Tensor Value ByteString, Tensor Value ByteString)) + +-- | Restore a Reader to its initial clean state. +readerReset :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) +readerReset' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) + +-- | Restore a Reader to its initial clean state. +readerResetV2 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode) +readerResetV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode) + +-- | Restore a reader to a previously saved state. +-- +-- Not all Readers support being restored, so this can produce an +-- Unimplemented error. +readerRestoreState :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) +readerRestoreState' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 ByteString -> m' (ControlNode) + +-- | Restore a reader to a previously saved state. +-- +-- Not all Readers support being restored, so this can produce an +-- Unimplemented error. +readerRestoreStateV2 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 ByteString -> m' (ControlNode) +readerRestoreStateV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 ByteString -> m' (ControlNode) + +-- | Produce a string tensor that encodes the state of a Reader. +-- +-- Not all Readers support being serialized, so this can produce an +-- Unimplemented error. +readerSerializeState :: (MonadBuild m') => Tensor Ref ByteString -> m' (Tensor Value ByteString) +readerSerializeState' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (Tensor Value ByteString) + +-- | Produce a string tensor that encodes the state of a Reader. +-- +-- Not all Readers support being serialized, so this can produce an +-- Unimplemented error. +readerSerializeStateV2 :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value ByteString) +readerSerializeStateV2' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value ByteString) -- | Returns the real part of a complex number. -- @@ -461,130 +4016,1127 @@ conj :: (TensorType t, OneOf '[Complex Double, Complex Float] t) => Tensor v1 t -- -- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] -- tf.real(input) ==> [-2.25, 3.25] ``` -real :: (TensorType t, OneOf '[Complex Double, Complex Float] t, TensorType tout, OneOf '[Double, Float] tout) => Tensor v1 t -> Tensor Value tout +real :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => Tensor v'1 t -> Tensor Build tout +real' :: (OneOf '[Complex Double, Complex Float] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -> Tensor Build tout --- | Converts two real numbers to a complex number. +-- | Returns x / y element-wise for real types. -- --- Given a tensor real representing the real part of a complex --- number, and a tensor imag representing the imaginary part of a --- complex number, this operation returns complex numbers elementwise of --- the form \(a + bj\), where *a* represents the real part and *b* --- represents the imag part. +-- If x and y are reals, this will return the +-- floating-point division. -- --- The input tensors real and imag must have the same --- shape. +--
    +--
  • NOTE*: Div supports broadcasting. More about broadcasting +-- here
  • +--
+realDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +realDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes the reciprocal of x element-wise. +-- +-- I.e., \(y = 1 / x\). +reciprocal :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +reciprocal' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes the gradient for the inverse of x wrt its input. +-- +-- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy is +-- the corresponding input gradient. +reciprocalGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +reciprocalGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Emits randomized records. +recordInput :: (MonadBuild m') => m' (Tensor Value ByteString) +recordInput' :: (MonadBuild m') => OpParams -> m' (Tensor Value ByteString) + +-- | Joins a string Tensor across the given dimensions. +-- +-- Computes the string join across dimensions in the given string Tensor +-- of shape `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by +-- joining the input strings with the given separator (default: empty +-- string). Negative indices are counted backwards from the end, with +-- `-1` being equivalent to `n - 1`. -- -- For example: -- --- ``` # tensor real is [2.25, 3.25] # tensor imag is --- [4.75, 5.75] tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + --- 5.75j]] ``` -complex :: (TensorType t, OneOf '[Double, Float] t, TensorType tout, OneOf '[Complex Double, Complex Float] tout) => Tensor v1 t -> Tensor v2 t -> Tensor Value tout +-- ``` # tensor a is [["a", "b"], ["c", "d"]] tf.reduce_join(a, +-- 0) ==> ["ac", "bd"] tf.reduce_join(a, 1) ==> ["ab", "cd"] +-- tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] +-- tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] +-- tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] +-- tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] +-- tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] +-- tf.reduce_join(a, [0, 1]) ==> ["acbd"] tf.reduce_join(a, [1, 0]) +-- ==> ["abcd"] tf.reduce_join(a, []) ==> ["abcd"] ``` +reduceJoin :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString +reduceJoin' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString --- | Creates a sequence of numbers. +-- | Creates or finds a child frame, and makes `data` available to the +-- child frame. -- --- This operation creates a sequence of numbers that begins at --- start and extends by increments of delta up to but --- not including limit. +-- The unique frame_name is used by the Executor to +-- identify frames. If is_constant is true, output is a +-- constant in the child frame; otherwise it may be changed in the child +-- frame. At most parallel_iterations iterations are run in +-- parallel in the child frame. +refEnter :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) +refEnter' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) + +-- | Exits the current frame to its parent frame. +-- +-- Exit makes its input `data` available to the parent frame. +refExit :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) +refExit' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) + +-- | Return the same ref tensor as the input ref tensor. +refIdentity :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) +refIdentity' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) + +-- | Forwards the value of an available tensor from inputs to +-- output. +-- +-- Merge waits for at least one of the tensors in +-- inputs to become available. It is usually combined with +-- Switch to implement branching. +-- +-- Merge forwards the first tensor for become available to +-- output, and sets value_index to its index in +-- inputs. +refMerge :: (MonadBuild m', TensorType t) => [Tensor Ref t] -> m' ((Tensor Ref t, Tensor Value Int32)) +refMerge' :: (MonadBuild m', TensorType t) => OpParams -> [Tensor Ref t] -> m' ((Tensor Ref t, Tensor Value Int32)) + +-- | Makes its input available to the next iteration. +refNextIteration :: (MonadBuild m', TensorType t) => Tensor Ref t -> m' (Tensor Ref t) +refNextIteration' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> m' (Tensor Ref t) + +-- | Forwards the indexth element of inputs to +-- output. +refSelect :: (MonadBuild m', TensorType t) => Tensor v'1 Int32 -> [Tensor Ref t] -> m' (Tensor Ref t) +refSelect' :: (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Int32 -> [Tensor Ref t] -> m' (Tensor Ref t) + +-- | Forwards the ref tensor `data` to the output port determined by +-- pred. +-- +-- If pred is true, the `data` input is forwarded to +-- output_true. Otherwise, the data goes to +-- output_false. +-- +-- See also Switch and Merge. +refSwitch :: (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 Bool -> m' ((Tensor Ref t, Tensor Ref t)) +refSwitch' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 Bool -> m' ((Tensor Ref t, Tensor Ref t)) + +-- | Computes rectified linear: `max(features, 0)`. +relu :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t +relu' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes rectified linear 6: `min(max(features, 0), 6)`. +relu6 :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t +relu6' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes rectified linear 6 gradients for a Relu6 operation. +relu6Grad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +relu6Grad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes rectified linear gradients for a Relu operation. +reluGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +reluGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Given a quantized tensor described by (input, input_min, input_max), +-- outputs a +-- +-- range that covers the actual values present in that tensor. This op is +-- typically used to produce the requested_output_min and +-- requested_output_max for Requantize. +requantizationRange :: (OneOf '[Int16, Int32, Word16, Word8] tinput) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build Float, Tensor Build Float) +requantizationRange' :: (OneOf '[Int16, Int32, Word16, Word8] tinput) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> (Tensor Build Float, Tensor Build Float) + +-- | Convert the quantized input tensor into a lower-precision +-- output, using the +-- +-- output range specified with requested_output_min and +-- requested_output_max. +-- +--
    +--
  • input_min, input_max are scalar floats that specify the +-- range for the float interpretation of the input data. For +-- example, if input_min is -1.0f and input_max is 1.0f, and we are +-- dealing with quint16 quantized data, then a 0 value in the 16-bit data +-- should be interpreted as -1.0f, and a 65535 means 1.0f.
  • +--
+requantize :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) +requantize' :: (OneOf '[Int16, Int32, Word16, Word8] tinput, OneOf '[Int16, Int32, Word16, Word8] out_type) => OpParams -> Tensor v'1 tinput -> Tensor v'2 Float -> Tensor v'3 Float -> Tensor v'4 Float -> Tensor v'5 Float -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) + +-- | Reshapes a tensor. +-- +-- Given tensor, this operation returns a tensor that has the +-- same values as tensor with shape shape. +-- +-- If one component of shape is the special value -1, the size of +-- that dimension is computed so that the total size remains constant. In +-- particular, a shape of `[-1]` flattens into 1-D. At most one +-- component of shape can be -1. +-- +-- If shape is 1-D or higher, then the operation returns a tensor +-- with shape shape filled with the values of tensor. In +-- this case, the number of elements implied by shape must be the +-- same as the number of elements in tensor. -- -- For example: -- --- ``` # start is 3 # limit is 18 # delta is 3 --- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] ``` -range :: (TensorType tidx, OneOf '[Int32, Int64, Double, Float] tidx) => Tensor v1 tidx -> Tensor v2 tidx -> Tensor v3 tidx -> Tensor Value tidx +-- ```prettyprint # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] # +-- tensor t has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3], +-- [4, 5, 6], [7, 8, 9]] +-- +-- # tensor t is [[[1, 1], [2, 2]], # [[3, 3], [4, 4]]] # tensor +-- t has shape [2, 2, 2] reshape(t, [2, 4]) ==> [[1, 1, 2, +-- 2], [3, 3, 4, 4]] +-- +-- # tensor t is [[[1, 1, 1], # [2, 2, 2]], # [[3, 3, 3], # [4, +-- 4, 4]], # [[5, 5, 5], # [6, 6, 6]]] # tensor t has shape [3, +-- 2, 3] # pass '[-1]' to flatten t reshape(t, [-1]) ==> [1, +-- 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] +-- +-- # -1 can also be used to infer the shape +-- +-- # -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, +-- 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2: +-- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, +-- 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3]) +-- ==> [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, +-- 6, 6]]] +-- +-- # tensor t is [7] # shape `[]` reshapes to a scalar +-- reshape(t, []) ==> 7 ``` +reshape :: (TensorType t, OneOf '[Int32, Int64] tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t +reshape' :: (TensorType t, OneOf '[Int32, Int64] tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t --- | Computes the "logical or" of elements across dimensions of a tensor. +-- | Resize images to size using area interpolation. -- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -any :: (TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 Bool -> Tensor v2 tidx -> Tensor Value Bool +-- Input images can be of different types but output images are always +-- float. +resizeArea :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float +resizeArea' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float --- | Computes the mean along sparse segments of a tensor. +-- | Resize images to size using bicubic interpolation. -- --- Read the section on Segmentation for an explanation of --- segments. --- --- Like SegmentMean, but segment_ids can have rank less --- than `data`'s first dimension, selecting a subset of dimension 0, --- specified by indices. -sparseSegmentMean :: (TensorType t, OneOf '[Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor v3 Int32 -> Tensor Value t +-- Input images can be of different types but output images are always +-- float. +resizeBicubic :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float +resizeBicubic' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float --- | Computes the sum along sparse segments of a tensor. +-- | Resize images to size using bilinear interpolation. -- --- Read the section on Segmentation for an explanation of --- segments. +-- Input images can be of different types but output images are always +-- float. +resizeBilinear :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float +resizeBilinear' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build Float + +-- | Computes the gradient of bilinear interpolation. +resizeBilinearGrad :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 Float -> Tensor v'2 t -> Tensor Build t +resizeBilinearGrad' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -> Tensor v'2 t -> Tensor Build t + +-- | Resize images to size using nearest neighbor +-- interpolation. +resizeNearestNeighbor :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t +resizeNearestNeighbor' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t + +-- | Computes the gradient of nearest neighbor interpolation. +resizeNearestNeighborGrad :: (OneOf '[Int32, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t +resizeNearestNeighborGrad' :: (OneOf '[Int32, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t + +-- | Update '*var' according to the adadelta scheme. -- --- Like SegmentSum, but segment_ids can have rank less --- than `data`'s first dimension, selecting a subset of dimension 0, --- specified by indices. +-- accum = rho() * accum + (1 - rho()) * grad.square(); update = +-- (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; +-- update_accum = rho() * update_accum + (1 - rho()) * update.square(); +-- var -= update; +resourceApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (ControlNode) +resourceApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> m' (ControlNode) + +-- | Update '*var' according to the adagrad scheme. +-- +-- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) +resourceApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> m' (ControlNode) +resourceApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> m' (ControlNode) + +-- | Update '*var' according to the proximal adagrad scheme. +resourceApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (ControlNode) +resourceApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 Int64 -> m' (ControlNode) + +-- | Update '*var' according to the Adam algorithm. +-- +-- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- +-- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - +-- beta2) * g_t * g_t variable <- variable - lr_t * m_t / (sqrt(v_t) + +-- epsilon) +resourceApplyAdam :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (ControlNode) +resourceApplyAdam' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 t -> m' (ControlNode) + +-- | Update '*var' according to the centered RMSProp algorithm. +-- +-- The centered RMSProp algorithm uses an estimate of the centered second +-- moment (i.e., the variance) for normalization, as opposed to regular +-- RMSProp, which uses the (uncentered) second moment. This often helps +-- with training, but is slightly more expensive in terms of computation +-- and memory. +-- +-- Note that in dense implementation of this algorithm, mg, ms, and mom +-- will update even if the grad is zero, but in this sparse +-- implementation, mg, ms, and mom will not update in iterations during +-- which the grad is zero. +-- +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 +-- mean_grad = decay * mean_grad + (1-decay) * gradient +-- +-- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - +-- mean_grad ** 2) +-- +-- mg <- rho * mg_{t-1} + (1-rho) * grad ms <- rho * ms_{t-1} + +-- (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / +-- sqrt(ms - mg * mg + epsilon) var <- var - mom +resourceApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) +resourceApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) + +-- | Update '*var' according to the Ftrl-proximal scheme. +-- +-- accum_new = accum + grad * grad linear += grad + +-- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 +-- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - +-- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new +resourceApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) +resourceApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) + +-- | Update '*var' by subtracting alpha * delta from it. +resourceApplyGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> m' (ControlNode) +resourceApplyGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> m' (ControlNode) + +-- | Update '*var' according to the momentum scheme. Set use_nesterov = +-- True if you +-- +-- want to use Nesterov momentum. +-- +-- accum = accum * momentum + grad var -= lr * accum +resourceApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) +resourceApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) + +-- | Update '*var' and '*accum' according to FOBOS with Adagrad learning +-- rate. +-- +-- accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var +-- = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} +resourceApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (ControlNode) +resourceApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> m' (ControlNode) + +-- | Update '*var' as FOBOS algorithm with fixed learning rate. +-- +-- prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) * +-- max{|prox_v|-alpha*l1,0} +resourceApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) +resourceApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> m' (ControlNode) + +-- | Update '*var' according to the RMSProp algorithm. +-- +-- Note that in dense implementation of this algorithm, ms and mom will +-- update even if the grad is zero, but in this sparse implementation, ms +-- and mom will not update in iterations during which the grad is zero. +-- +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = +-- learning_rate * gradient / sqrt(mean_square + epsilon) +-- +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * +-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom +resourceApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) +resourceApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> m' (ControlNode) + +-- | Gather slices from the variable pointed to by resource +-- according to indices. +-- +-- indices must be an integer tensor of any dimension (usually +-- 0-D or 1-D). Produces an output tensor with shape `indices.shape + +-- params.shape[1:]` where: +-- +-- ```python # Scalar indices output[:, ..., :] = params[indices, :, ... +-- :] +-- +-- # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] +-- +-- # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, +-- ..., j], :, ..., :] ``` +resourceGather :: (MonadBuild m', TensorType dtype, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 tindices -> m' (Tensor Value dtype) +resourceGather' :: (MonadBuild m', TensorType dtype, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 tindices -> m' (Tensor Value dtype) + +-- | Adds sparse updates to the variable referenced by resource. +-- +-- This operation computes +-- +-- # Scalar indices ref[indices, ...] += updates[...] +-- +-- # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] +-- +-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] +-- += updates[i, ..., j, ...] +-- +-- Duplicate entries are handled correctly: if multiple indices +-- reference the same location, their contributions add. +-- +-- Requires `updates.shape = indices.shape + ref.shape[1:]`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/ScatterAdd.png" alt /div +resourceScatterAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 tindices -> Tensor v'3 dtype -> m' (ControlNode) +resourceScatterAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 tindices -> Tensor v'3 dtype -> m' (ControlNode) + +-- | var: Should be from a Variable(). +resourceSparseApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (ControlNode) +resourceSparseApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (ControlNode) + +-- | Update relevant entries in '*var' and '*accum' according to the +-- adagrad scheme. +-- +-- That is for rows we have grad for, we update var and accum as follows: +-- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) +resourceSparseApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (ControlNode) +resourceSparseApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (ControlNode) + +-- | Update entries in '*var' and '*accum' according to the proximal +-- adagrad scheme. +resourceSparseApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (ControlNode) +resourceSparseApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (ControlNode) + +-- | Update '*var' according to the centered RMSProp algorithm. +-- +-- The centered RMSProp algorithm uses an estimate of the centered second +-- moment (i.e., the variance) for normalization, as opposed to regular +-- RMSProp, which uses the (uncentered) second moment. This often helps +-- with training, but is slightly more expensive in terms of computation +-- and memory. +-- +-- Note that in dense implementation of this algorithm, mg, ms, and mom +-- will update even if the grad is zero, but in this sparse +-- implementation, mg, ms, and mom will not update in iterations during +-- which the grad is zero. +-- +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 +-- mean_grad = decay * mean_grad + (1-decay) * gradient Delta = +-- learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** +-- 2) +-- +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * +-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom +resourceSparseApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (ControlNode) +resourceSparseApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (ControlNode) + +-- | Update relevant entries in '*var' according to the Ftrl-proximal +-- scheme. +-- +-- That is for rows we have grad for, we update var, accum and linear as +-- follows: accum_new = accum + grad * grad linear += grad + +-- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 +-- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - +-- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new +resourceSparseApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) +resourceSparseApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (ControlNode) + +-- | Update relevant entries in '*var' and '*accum' according to the +-- momentum scheme. +-- +-- Set use_nesterov = True if you want to use Nesterov momentum. +-- +-- That is for rows we have grad for, we update var and accum as follows: +-- +-- accum = accum * momentum + grad var -= lr * accum +resourceSparseApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (ControlNode) +resourceSparseApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (ControlNode) + +-- | Sparse update entries in '*var' and '*accum' according to FOBOS +-- algorithm. +-- +-- That is for rows we have grad for, we update var and accum as follows: +-- accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / +-- sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} +resourceSparseApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (ControlNode) +resourceSparseApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (ControlNode) + +-- | Sparse update '*var' as FOBOS algorithm with fixed learning rate. +-- +-- That is for rows we have grad for, we update var as follows: prox_v = +-- var - alpha * grad var = sign(prox_v)/(1+alpha*l2) * +-- max{|prox_v|-alpha*l1,0} +resourceSparseApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (ControlNode) +resourceSparseApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (ControlNode) + +-- | Update '*var' according to the RMSProp algorithm. +-- +-- Note that in dense implementation of this algorithm, ms and mom will +-- update even if the grad is zero, but in this sparse implementation, ms +-- and mom will not update in iterations during which the grad is zero. +-- +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = +-- learning_rate * gradient / sqrt(mean_square + epsilon) +-- +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * +-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom +resourceSparseApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (ControlNode) +resourceSparseApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> ResourceHandle -> ResourceHandle -> ResourceHandle -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (ControlNode) + +-- | Restores a tensor from checkpoint files. +-- +-- Reads a tensor stored in one or several files. If there are several +-- files (for instance because a tensor was saved as slices), +-- file_pattern may contain wildcard symbols (* and +-- ?) in the filename portion only, not in the directory +-- portion. +-- +-- If a file_pattern matches several files, +-- preferred_shard can be used to hint in which file the +-- requested tensor is likely to be found. This op will first open the +-- file at index preferred_shard in the list of matching files +-- and try to restore tensors from that file. Only if some tensors or +-- tensor slices are not found in that first file, then the Op opens all +-- the files. Setting preferred_shard to match the value passed +-- as the shard input of a matching Save Op may speed +-- up Restore. This attribute only affects performance, not correctness. +-- The default value -1 means files are processed in order. +-- +-- See also RestoreSlice. +restore :: (TensorType dt) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor Build dt +restore' :: (TensorType dt) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor Build dt + +-- | Restores a tensor from checkpoint files. +-- +-- This is like Restore except that restored tensor can be +-- listed as filling only a slice of a larger tensor. +-- shape_and_slice specifies the shape of the larger tensor and +-- the slice that the restored tensor covers. +-- +-- The shape_and_slice input has the same format as the elements +-- of the shapes_and_slices input of the SaveSlices op. +restoreSlice :: (TensorType dt) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> Tensor Build dt +restoreSlice' :: (TensorType dt) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> Tensor Build dt + +-- | Restores tensors from a V2 checkpoint. +-- +-- For backward compatibility with the V1 format, this Op currently +-- allows restoring from a V1 checkpoint as well: - This Op first +-- attempts to find the V2 index file pointed to by "prefix", and if +-- found proceed to read it as a V2 checkpoint; - Otherwise the V1 read +-- path is invoked. Relying on this behavior is not recommended, as the +-- ability to fall back to read V1 might be deprecated and eventually +-- removed. +-- +-- By default, restores the named tensors in full. If the caller wishes +-- to restore specific slices of stored tensors, "shape_and_slices" +-- should be non-empty strings and correspondingly well-formed. +-- +-- Callers must ensure all the named tensors are indeed stored in the +-- checkpoint. +restoreV2 :: (TensorTypes dtypes) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (Build) dtypes +restoreV2' :: (TensorTypes dtypes) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (Build) dtypes + +-- | Reverses specific dimensions of a tensor. +-- +-- Given a tensor, and a bool tensor dims +-- representing the dimensions of tensor, this operation +-- reverses each dimension i of tensor where `dims[i]` is +-- True. +-- +-- tensor can have up to 8 dimensions. The number of dimensions +-- of tensor must equal the number of elements in dims. +-- In other words: +-- +-- `rank(tensor) = size(dims)` -- -- For example: -- --- ```prettyprint c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) +-- ```prettyprint # tensor t is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, +-- 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # +-- [20, 21, 22, 23]]]] # tensor t shape is [1, 2, 3, 4] -- --- # Select two rows, one segment. tf.sparse_segment_sum(c, --- tf.constant([0, 1]), tf.constant([0, 0])) ==> [[0 0 0 0]] +-- # dims is [False, False, False, True] reverse(t, dims) ==> +-- [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], +-- [19, 18, 17, 16], [23, 22, 21, 20]]]] -- --- # Select two rows, two segment. tf.sparse_segment_sum(c, --- tf.constant([0, 1]), tf.constant([0, 1])) ==> [[ 1 2 3 4] [-1 -2 -3 --- -4]] +-- # dims is [False, True, False, False] reverse(t, dims) ==> +-- [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, 1, 2, +-- 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]] -- --- # Select all rows, two segments. tf.sparse_segment_sum(c, --- tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) ==> [[0 0 0 0] [5 6 --- 7 8]] --- --- # Which is equivalent to: tf.segment_sum(c, tf.constant([0, 0, 1])) --- ``` -sparseSegmentSum :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor v3 Int32 -> Tensor Value t +-- # dims is [False, False, True, False] reverse(t, dims) ==> +-- [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, 23], [16, +-- 17, 18, 19], [12, 13, 14, 15]]]] ``` +reverse :: (OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Bool -> Tensor Build t +reverse' :: (OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Bool -> Tensor Build t --- | Computes the sum along segments of a tensor. +-- | Reverses variable length slices. +-- +-- This op first slices input along the dimension +-- batch_dim, and for each slice i, reverses the first +-- `seq_lengths[i]` elements along the dimension seq_dim. +-- +-- The elements of seq_lengths must obey `seq_lengths[i] < +-- input.dims[seq_dim]`, and seq_lengths must be a vector of +-- length `input.dims[batch_dim]`. +-- +-- The output slice i along dimension batch_dim is then +-- given by input slice i, with the first `seq_lengths[i]` +-- slices along dimension seq_dim reversed. +-- +-- For example: +-- +-- ```prettyprint # Given this: batch_dim = 0 seq_dim = 1 input.dims = +-- (4, 8, ...) seq_lengths = [7, 2, 3, 5] +-- +-- # then slices of input are reversed on seq_dim, but only up to +-- seq_lengths: output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] +-- output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] output[2, 0:3, :, +-- ...] = input[2, 3:0:-1, :, ...] output[3, 0:5, :, ...] = input[3, +-- 5:0:-1, :, ...] +-- +-- # while entries past seq_lens are copied through: output[0, 7:, :, +-- ...] = input[0, 7:, :, ...] output[1, 2:, :, ...] = input[1, 2:, :, +-- ...] output[2, 3:, :, ...] = input[2, 3:, :, ...] output[3, 2:, :, +-- ...] = input[3, 2:, :, ...] ``` +-- +-- In contrast, if: +-- +-- ```prettyprint # Given this: batch_dim = 2 seq_dim = 0 input.dims = +-- (8, ?, 4, ...) seq_lengths = [7, 2, 3, 5] +-- +-- # then slices of input are reversed on seq_dim, but only up to +-- seq_lengths: output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] +-- output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] output[0:3, :, +-- 2, :, ...] = input[3:0:-1, :, 2, :, ...] output[0:5, :, 3, :, ...] = +-- input[5:0:-1, :, 3, :, ...] +-- +-- # while entries past seq_lens are copied through: output[7:, :, 0, :, +-- ...] = input[7:, :, 0, :, ...] output[2:, :, 1, :, ...] = input[2:, :, +-- 1, :, ...] output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] +-- output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] ``` +reverseSequence :: (TensorType t, OneOf '[Int32, Int64] tlen) => Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor Build t +reverseSequence' :: (TensorType t, OneOf '[Int32, Int64] tlen) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor Build t + +-- | Reverses specific dimensions of a tensor. +-- +-- NOTE `tf.reverse` has now changed behavior in preparation for 1.0. +-- `tf.reverse_v2` is currently an alias that will be deprecated before +-- TF 1.0. +-- +-- Given a tensor, and a int32 tensor axis +-- representing the set of dimensions of tensor to reverse. This +-- operation reverses each dimension i for which there exists +-- j s.t. `axis[j] == i`. +-- +-- tensor can have up to 8 dimensions. The number of dimensions +-- specified in axis may be 0 or more entries. If an index is +-- specified more than once, a InvalidArgument error is raised. +-- +-- For example: +-- +-- ```prettyprint # tensor t is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, +-- 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # +-- [20, 21, 22, 23]]]] # tensor t shape is [1, 2, 3, 4] +-- +-- # dims is [3] or dims is -1 reverse(t, dims) ==> +-- [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], +-- [19, 18, 17, 16], [23, 22, 21, 20]]]] +-- +-- # dims is '[1]' (or dims is '[-3]') reverse(t, dims) +-- ==> [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, +-- 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]] +-- +-- # dims is '[2]' (or dims is '[-2]') reverse(t, dims) +-- ==> [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, +-- 23], [16, 17, 18, 19], [12, 13, 14, 15]]]] ``` +reverseV2 :: (OneOf '[Int32, Int64] tidx, OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t +reverseV2' :: (OneOf '[Int32, Int64] tidx, OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t + +-- | Returns element-wise integer closest to x. +-- +-- If the result is midway between two representable values, the even +-- representable is chosen. For example: +-- +-- ``` rint(-1.5) ==> -2.0 rint(0.5000001) ==> 1.0 rint([-1.7, +-- -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., +-- 2.] ``` +rint :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +rint' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Rounds the values of a tensor to the nearest integer, element-wise. +-- +-- Rounds half to even. Also known as bankers rounding. If you want to +-- round according to the current system rounding mode use std::cint. +round :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +round' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes reciprocal of square root of x element-wise. +-- +-- I.e., \(y = 1 / sqrt{x}\). +rsqrt :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +rsqrt' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes the gradient for the rsqrt of x wrt its input. +-- +-- Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and +-- dy is the corresponding input gradient. +rsqrtGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +rsqrtGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Generate a single randomly distorted bounding box for an image. +-- +-- Bounding box annotations are often supplied in addition to +-- ground-truth labels in image recognition or object localization tasks. +-- A common technique for training such a system is to randomly distort +-- an image while preserving its content, i.e. *data augmentation*. This +-- Op outputs a randomly distorted localization of an object, i.e. +-- bounding box, given an image_size, bounding_boxes +-- and a series of constraints. +-- +-- The output of this Op is a single bounding box that may be used to +-- crop the original image. The output is returned as 3 tensors: +-- begin, size and bboxes. The first 2 tensors +-- can be fed directly into `tf.slice` to crop the image. The latter may +-- be supplied to `tf.image.draw_bounding_boxes` to visualize what the +-- bounding box looks like. +-- +-- Bounding boxes are supplied and returned as `[y_min, x_min, y_max, +-- x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` +-- relative to the width and height of the underlying image. +-- +-- For example, +-- +-- ```python # Generate a single distorted bounding box. begin, size, +-- bbox_for_draw = tf.image.sample_distorted_bounding_box( +-- tf.shape(image), bounding_boxes=bounding_boxes) +-- +-- # Draw the bounding box in an image summary. image_with_box = +-- tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) +-- tf.image_summary(images_with_box, image_with_box) +-- +-- # Employ the bounding box to distort the image. distorted_image = +-- tf.slice(image, begin, size) ``` +-- +-- Note that if no bounding box information is available, setting +-- `use_image_if_no_bounding_boxes = true` will assume there is a single +-- implicit bounding box covering the whole image. If +-- use_image_if_no_bounding_boxes is false and no bounding boxes +-- are supplied, an error is raised. +sampleDistortedBoundingBox :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) => Tensor v'1 t -> Tensor v'2 Float -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) +sampleDistortedBoundingBox' :: (MonadBuild m', OneOf '[Int16, Int32, Int64, Int8, Word8] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Float -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) + +-- | Saves the input tensors to disk. +-- +-- The size of tensor_names must match the number of tensors in +-- `data`. `data[i]` is written to filename with name +-- `tensor_names[i]`. +-- +-- See also SaveSlices. +save :: (MonadBuild m', TensorTypes t) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> TensorList (v'3) t -> m' (ControlNode) +save' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> TensorList (v'3) t -> m' (ControlNode) + +-- | Saves input tensors slices to disk. +-- +-- This is like Save except that tensors can be listed in the +-- saved file as being a slice of a larger tensor. +-- shapes_and_slices specifies the shape of the larger tensor +-- and the slice that this tensor covers. shapes_and_slices must +-- have as many elements as tensor_names. +-- +-- Elements of the shapes_and_slices input must either be: +-- +--
    +--
  • The empty string, in which case the corresponding tensor is saved +-- normally.
  • +--
  • A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the +-- dimI are the dimensions of the larger tensor and `slice-spec` +-- specifies what part is covered by the tensor to save.
  • +--
+-- +-- `slice-spec` itself is a :-separated list: +-- `slice0:slice1:...:sliceN-1` where each sliceI is either: +-- +--
    +--
  • The string - meaning that the slice covers all indices of +-- this dimension
  • +--
  • `start,length` where start and length are +-- integers. In that case the slice covers length indices starting +-- at start.
  • +--
+-- +-- See also Save. +saveSlices :: (MonadBuild m', TensorTypes t) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) t -> m' (ControlNode) +saveSlices' :: (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) t -> m' (ControlNode) + +-- | Saves tensors in V2 checkpoint format. +-- +-- By default, saves the named tensors in full. If the caller wishes to +-- save specific slices of full tensors, "shape_and_slices" should be +-- non-empty strings and correspondingly well-formed. +saveV2 :: (MonadBuild m', TensorTypes dtypes) => Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) dtypes -> m' (ControlNode) +saveV2' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> Tensor v'3 ByteString -> TensorList (v'4) dtypes -> m' (ControlNode) + +-- | Outputs a Summary protocol buffer with scalar values. +-- +-- The input tags and values must have the same shape. +-- The generated summary has a summary value for each tag-value pair in +-- tags and values. +scalarSummary :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString +scalarSummary' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor Build ByteString + +-- | Adds sparse updates to a variable reference. +-- +-- This operation computes +-- +-- # Scalar indices ref[indices, ...] += updates[...] +-- +-- # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] +-- +-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] +-- += updates[i, ..., j, ...] +-- +-- This operation outputs ref after the update is done. This +-- makes it easier to chain operations that need to use the reset value. +-- +-- Duplicate entries are handled correctly: if multiple indices +-- reference the same location, their contributions add. +-- +-- Requires `updates.shape = indices.shape + ref.shape[1:]`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/ScatterAdd.png" alt /div +scatterAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) +scatterAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) + +-- | Divides a variable reference by sparse updates. +-- +-- This operation computes +-- +-- # Scalar indices ref[indices, ...] /= updates[...] +-- +-- # Vector indices (for each i) ref[indices[i], ...] /= updates[i, ...] +-- +-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] +-- /= updates[i, ..., j, ...] +-- +-- This operation outputs ref after the update is done. This +-- makes it easier to chain operations that need to use the reset value. +-- +-- Duplicate entries are handled correctly: if multiple indices +-- reference the same location, their contributions divide. +-- +-- Requires `updates.shape = indices.shape + ref.shape[1:]`. +scatterDiv :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) +scatterDiv' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) + +-- | Multiplies sparse updates into a variable reference. +-- +-- This operation computes +-- +-- # Scalar indices ref[indices, ...] *= updates[...] +-- +-- # Vector indices (for each i) ref[indices[i], ...] *= updates[i, ...] +-- +-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] +-- *= updates[i, ..., j, ...] +-- +-- This operation outputs ref after the update is done. This +-- makes it easier to chain operations that need to use the reset value. +-- +-- Duplicate entries are handled correctly: if multiple indices +-- reference the same location, their contributions multiply. +-- +-- Requires `updates.shape = indices.shape + ref.shape[1:]`. +scatterMul :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) +scatterMul' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) + +-- | Creates a new tensor by applying sparse updates to individual +-- +-- values or slices within a zero tensor of the given shape tensor +-- according to indices. This operator is the inverse of the +-- tf.gather_nd operator which extracts values or slices from a +-- given tensor. +-- +-- TODO(simister): Add a link to Variable.getitem documentation on +-- slice syntax. +-- +-- shape is a TensorShape with rank P and +-- indices is a Tensor of rank Q. +-- +-- indices must be integer tensor, containing indices into +-- shape. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < +-- K <= P`. +-- +-- The innermost dimension of indices (with length K) +-- corresponds to indices into elements (if `K = P`) or slices (if `K +-- < P`) along the Kth dimension of shape. +-- +-- updates is Tensor of rank `Q-1+P-K` with shape: +-- +-- ``` [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]]. ``` +-- +-- The simplest form of scatter is to insert individual elements in a +-- tensor by index. For example, say we want to insert 4 scattered +-- elements in a rank-1 tensor with 8 elements. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/ScatterNd1.png" alt /div +-- +-- In Python, this scatter operation would look like this: +-- +-- indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, +-- 10, 11, 12]) shape = tf.constant([8]) scatter = tf.scatter_nd(indices, +-- updates, shape) with tf.Session() as sess: print sess.run(scatter) +-- +-- The resulting tensor would look like this: +-- +--
    +--
  • 0, 11, 0, 10, 9, 0, 0, 12
  • +--
+-- +-- We can also, insert entire slices of a higher rank tensor all at once. +-- For example, if we wanted to insert two slices in the first dimension +-- of a rank-3 tensor with two matrices of new values. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/ScatterNd2.png" alt /div +-- +-- In Python, this scatter operation would look like this: +-- +-- indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5, +-- 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, +-- 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) shape = tf.constant([4, 4, 4]) +-- scatter = tf.scatter_nd(indices, updates, shape) with tf.Session() as +-- sess: print sess.run(scatter) +-- +-- The resulting tensor would look like this: +-- +--
    +--
  • [[5, 5, 5, 5 , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, +-- 8]],
  • +--
  • [0, 0, 0, 0 , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, +-- 0]],
  • +--
  • [5, 5, 5, 5 , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, +-- 8]],
  • +--
  • [0, 0, 0, 0 , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, +-- 0]]]
  • +--
+scatterNd :: (TensorType t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor Build t +scatterNd' :: (TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor Build t + +-- | Applies sparse addition between updates and individual values +-- or slices +-- +-- within a given variable according to indices. +-- +-- ref is a Tensor with rank P and +-- indices is a Tensor of rank Q. +-- +-- indices must be integer tensor, containing indices into +-- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < +-- K <= P`. +-- +-- The innermost dimension of indices (with length K) +-- corresponds to indices into elements (if `K = P`) or slices (if `K +-- < P`) along the Kth dimension of ref. +-- +-- updates is Tensor of rank `Q-1+P-K` with shape: +-- +-- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` +-- +-- For example, say we want to add 4 scattered elements to a rank-1 +-- tensor to 8 elements. In Python, that addition would look like this: +-- +-- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = +-- tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, +-- 12]) add = tf.scatter_nd_add(ref, indices, updates) with tf.Session() +-- as sess: print sess.run(add) +-- +-- The resulting update to ref would look like this: +-- +--
    +--
  • 1, 13, 3, 14, 14, 6, 7, 20
  • +--
+-- +-- See tf.scatter_nd for more details about how to make updates to +-- slices. +scatterNdAdd :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) +scatterNdAdd' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) + +-- | Applies sparse subtraction between updates and individual +-- values or slices +-- +-- within a given variable according to indices. +-- +-- ref is a Tensor with rank P and +-- indices is a Tensor of rank Q. +-- +-- indices must be integer tensor, containing indices into +-- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < +-- K <= P`. +-- +-- The innermost dimension of indices (with length K) +-- corresponds to indices into elements (if `K = P`) or slices (if `K +-- < P`) along the Kth dimension of ref. +-- +-- updates is Tensor of rank `Q-1+P-K` with shape: +-- +-- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` +-- +-- For example, say we want to subtract 4 scattered elements from a +-- rank-1 tensor with 8 elements. In Python, that subtraction would look +-- like this: +-- +-- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = +-- tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, +-- 12]) sub = tf.scatter_nd_sub(ref, indices, updates) with tf.Session() +-- as sess: print sess.run(sub) +-- +-- The resulting update to ref would look like this: +-- +--
    +--
  • 1, -9, 3, -6, -4, 6, 7, -4
  • +--
+-- +-- See tf.scatter_nd for more details about how to make updates to +-- slices. +scatterNdSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) +scatterNdSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) + +-- | Applies sparse updates to individual values or slices within +-- a given +-- +-- variable according to indices. +-- +-- ref is a Tensor with rank P and +-- indices is a Tensor of rank Q. +-- +-- indices must be integer tensor, containing indices into +-- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < +-- K <= P`. +-- +-- The innermost dimension of indices (with length K) +-- corresponds to indices into elements (if `K = P`) or slices (if `K +-- < P`) along the Kth dimension of ref. +-- +-- updates is Tensor of rank `Q-1+P-K` with shape: +-- +-- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` +-- +-- For example, say we want to update 4 scattered elements to a rank-1 +-- tensor to 8 elements. In Python, that update would look like this: +-- +-- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = +-- tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, +-- 12]) update = tf.scatter_nd_update(ref, indices, updates) with +-- tf.Session() as sess: print sess.run(update) +-- +-- The resulting update to ref would look like this: +-- +--
    +--
  • 1, 11, 3, 10, 9, 6, 7, 12
  • +--
+-- +-- See tf.scatter_nd for more details about how to make updates to +-- slices. +scatterNdUpdate :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) +scatterNdUpdate' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) + +-- | Subtracts sparse updates to a variable reference. +-- +-- # Scalar indices ref[indices, ...] -= updates[...] +-- +-- # Vector indices (for each i) ref[indices[i], ...] -= updates[i, ...] +-- +-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] +-- -= updates[i, ..., j, ...] +-- +-- This operation outputs ref after the update is done. This +-- makes it easier to chain operations that need to use the reset value. +-- +-- Duplicate entries are handled correctly: if multiple indices +-- reference the same location, their (negated) contributions add. +-- +-- Requires `updates.shape = indices.shape + ref.shape[1:]`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/ScatterSub.png" alt /div +scatterSub :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) +scatterSub' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) + +-- | Applies sparse updates to a variable reference. +-- +-- This operation computes +-- +-- # Scalar indices ref[indices, ...] = updates[...] +-- +-- # Vector indices (for each i) ref[indices[i], ...] = updates[i, ...] +-- +-- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] +-- = updates[i, ..., j, ...] +-- +-- This operation outputs ref after the update is done. This +-- makes it easier to chain operations that need to use the reset value. +-- +-- If values in ref is to be updated more than once, because +-- there are duplicate entries in indices, the order at which +-- the updates happen for each value is undefined. +-- +-- Requires `updates.shape = indices.shape + ref.shape[1:]`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/ScatterUpdate.png" alt /div +scatterUpdate :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) +scatterUpdate' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 tindices -> Tensor v'3 t -> m' (Tensor Ref t) + +-- | Computes fingerprints of the input strings. +sdcaFprint :: Tensor v'1 ByteString -> Tensor Build Int64 +sdcaFprint' :: OpParams -> Tensor v'1 ByteString -> Tensor Build Int64 + +-- | Distributed version of Stochastic Dual Coordinate Ascent (SDCA) +-- optimizer for +-- +-- linear models with L1 + L2 regularization. As global optimization +-- objective is strongly-convex, the optimizer optimizes the dual +-- objective at each step. The optimizer applies each update one example +-- at a time. Examples are sampled uniformly, and the optimizer is +-- learning rate free and enjoys linear convergence rate. +-- +-- Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai; +-- Zhang, Tong. 2012 arXiv1211.2717S: +-- http://arxiv.org/pdf/1211.2717v1.pdf +-- +-- Loss objective = sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w| +-- +-- Adding vs. Averaging in Distributed Primal-Dual Optimization. Chenxin +-- Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, +-- Martin Takac http://arxiv.org/abs/1502.03508 +-- +-- Stochastic Dual Coordinate Ascent with Adaptive Probabilities Dominik +-- Csiba, Zheng Qu, Peter Richtarik +-- https://arxiv.org/abs/1502.08053 +sdcaOptimizer :: Float -> Float -> Int64 -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 Int64] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> Tensor v'5 Float -> Tensor v'6 Float -> [Tensor v'7 Int64] -> [Tensor v'8 Float] -> [Tensor v'9 Float] -> Tensor v'10 Float -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) +sdcaOptimizer' :: OpParams -> Float -> Float -> Int64 -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 Int64] -> [Tensor v'3 Float] -> [Tensor v'4 Float] -> Tensor v'5 Float -> Tensor v'6 Float -> [Tensor v'7 Int64] -> [Tensor v'8 Float] -> [Tensor v'9 Float] -> Tensor v'10 Float -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) + +-- | Applies L1 regularization shrink step on the parameters. +sdcaShrinkL1 :: (MonadBuild m') => Float -> Float -> [Tensor Ref Float] -> m' (ControlNode) +sdcaShrinkL1' :: (MonadBuild m') => OpParams -> Float -> Float -> [Tensor Ref Float] -> m' (ControlNode) + +-- | Computes the maximum along segments of a tensor. -- -- Read the section on Segmentation for an explanation of -- segments. -- --- Computes a tensor such that `(output[i] = sum_{j...} data[j...]` where --- the sum is over tuples `j...` such that `segment_ids[j...] == i`. --- Unlike SegmentSum, segment_ids need not be sorted --- and need not cover all values in the full range of valid values. --- --- If the sum is empty for a given segment ID i, `output[i] = --- 0`. --- --- num_segments should equal the number of distinct segment IDs. +-- Computes a tensor such that \(output_i = max_j(data_j)\) where +-- max is over j such that `segment_ids[j] == i`. -- -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" --- src="../../images/UnsortedSegmentSum.png" alt /div -unsortedSegmentSum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor v3 Int32 -> Tensor Value t - --- | Computes the minimum along segments of a tensor. --- --- Read the section on Segmentation for an explanation of --- segments. --- --- Computes a tensor such that \(output_i = min_j(data_j)\) where --- min is over j such that `segment_ids[j] == i`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/SegmentMin.png" alt /div -segmentMin :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor Value t - --- | Computes the product along segments of a tensor. --- --- Read the section on Segmentation for an explanation of --- segments. --- --- Computes a tensor such that \(output_i = prod_j data_j\) where the --- product is over j such that `segment_ids[j] == i`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/SegmentProd.png" alt /div -segmentProd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor Value t +-- src="../../images/SegmentMax.png" alt /div +segmentMax :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t +segmentMax' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -- | Computes the mean along segments of a tensor. -- @@ -598,7 +5150,36 @@ segmentProd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int3 -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/SegmentMean.png" alt /div -segmentMean :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor Value t +segmentMean :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t +segmentMean' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t + +-- | Computes the minimum along segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Computes a tensor such that \(output_i = min_j(data_j)\) where +-- min is over j such that `segment_ids[j] == i`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/SegmentMin.png" alt /div +segmentMin :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t +segmentMin' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t + +-- | Computes the product along segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Computes a tensor such that \(output_i = prod_j data_j\) where the +-- product is over j such that `segment_ids[j] == i`. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/SegmentProd.png" alt /div +segmentProd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t +segmentProd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t -- | Computes the sum along segments of a tensor. -- @@ -611,307 +5192,143 @@ segmentMean :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, -- style="width:70%; margin:auto; margin-bottom:10px; -- margin-top:20px;" style="width:100%" -- src="../../images/SegmentSum.png" alt /div -segmentSum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor Value t +segmentSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t +segmentSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor Build t --- | Returns the index with the smallest value across dimensions of a --- tensor. -argMin :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value Int64 +-- | Selects elements from t or e, depending on +-- condition. +-- +-- The t, and e tensors must all have the same shape, +-- and the output will also have that shape. +-- +-- The condition tensor must be a scalar if t and +-- e are scalars. If t and e are vectors or +-- higher rank, then condition must be either a scalar, a vector +-- with size matching the first dimension of t, or must have the +-- same shape as t. +-- +-- The condition tensor acts as a mask that chooses, based on +-- the value at each element, whether the corresponding element / row in +-- the output should be taken from t (if true) or e (if +-- false). +-- +-- If condition is a vector and t and e are +-- higher rank matrices, then it chooses which row (outer dimension) to +-- copy from t and e. If condition has the +-- same shape as t and e, then it chooses which element +-- to copy from t and e. +-- +-- For example: +-- +-- ```prettyprint # condition tensor is [[True, False] # [False, +-- True]] # t is [[1, 2], # [3, 4]] # e is [[5, 6], # +-- [7, 8]] select(condition, t, e) ==> [[1, 6], [7, 4]] +-- +-- # condition tensor is [True, False] # t is [[1, 2], +-- # [3, 4]] # e is [[5, 6], # [7, 8]] select(condition, t, e) +-- ==> [[1, 2], [7, 8]] +-- +-- ``` +select :: (TensorType t) => Tensor v'1 Bool -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t +select' :: (TensorType t) => OpParams -> Tensor v'1 Bool -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build t --- | Computes the maximum of elements across dimensions of a tensor. +-- | Computes the Eigen Decomposition of a batch of square self-adjoint +-- matrices. -- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -max :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t +-- The input is a tensor of shape `[..., M, M]` whose inner-most 2 +-- dimensions form square matrices, with the same constraints as the +-- single matrix SelfAdjointEig. +-- +-- The result is a [..., M+1, M] matrix with [..., 0,:] containing the +-- eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. +selfAdjointEig :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor Build t +selfAdjointEig' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t --- | Computes the minimum of elements across dimensions of a tensor. +-- | Computes the eigen decomposition of one or more square self-adjoint +-- matrices. -- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -min :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t +-- Computes the eigenvalues and (optionally) eigenvectors of each inner +-- matrix in input such that `input[..., :, :] = v[..., :, :] * +-- diag(e[..., :])`. +-- +-- ```prettyprint # a is a tensor. # e is a tensor of eigenvalues. # v is +-- a tensor of eigenvectors. e, v = self_adjoint_eig(a) e = +-- self_adjoint_eig(a, compute_v=False) ``` +selfAdjointEigV2 :: (OneOf '[Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t) +selfAdjointEigV2' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t) --- | Computes the product of elements across dimensions of a tensor. +-- | Serialize an N-minibatch SparseTensor into an `[N, +-- 3]` string Tensor. -- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -prod :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t +-- The SparseTensor must have rank R greater than 1, +-- and the first dimension is treated as the minibatch dimension. +-- Elements of the SparseTensor must be sorted in increasing +-- order of this first dimension. The serialized SparseTensor +-- objects going into each row of serialized_sparse will have +-- rank `R-1`. +-- +-- The minibatch size N is extracted from `sparse_shape[0]`. +serializeManySparse :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString +serializeManySparse' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString --- | Computes the sum of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -sum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t +-- | Serialize a SparseTensor into a string 3-vector (1-D +-- Tensor) object. +serializeSparse :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString +serializeSparse' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build ByteString --- | Multiply matrix "a" by matrix "b". +-- | Number of unique elements along last dimension of input set. -- --- The inputs must be two-dimensional matrices and the inner dimension of --- "a" must match the outer dimension of "b". This op is optimized for --- the case where at least one of "a" or "b" is sparse. The breakeven for --- using this versus a dense matrix multiply on one platform was 30% zero --- values in the sparse matrix. -sparseMatMul :: (TensorType ta, OneOf '[Word16, Float] ta, TensorType tb, OneOf '[Word16, Float] tb) => Tensor v1 ta -> Tensor v2 tb -> Tensor Value Float +-- Input set is a SparseTensor represented by +-- set_indices, set_values, and set_shape. The +-- last dimension contains values in a set, duplicates are allowed but +-- ignored. +-- +-- If validate_indices is True, this op validates the +-- order and range of set indices. +setSize :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build Int32 +setSize' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build Int32 --- | Multiply the matrix "a" by the matrix "b". +-- | Returns the shape of a tensor. -- --- The inputs must be two-dimensional matrices and the inner dimension of --- "a" (after being transposed if transpose_a is true) must match the --- outer dimension of "b" (after being transposed if transposed_b is --- true). +-- This operation returns a 1-D integer tensor representing the shape of +-- input. -- ---
    ---
  • Note*: The default kernel implementation for MatMul on GPUs uses --- cublas.
  • ---
-matMul :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t +-- For example: +-- +-- ```prettyprint # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], +-- [4, 4, 4]]] shape(t) ==> [2, 2, 3] ``` +shape :: (TensorType t, OneOf '[Int32, Int64] out_type) => Tensor v'1 t -> Tensor Build out_type +shape' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type --- | Returns the truth value of x AND y element-wise. +-- | Returns shape of tensors. -- ---
    ---
  • NOTE*: LogicalAnd supports broadcasting. More about --- broadcasting here
  • ---
-logicalAnd :: Tensor v1 Bool -> Tensor v2 Bool -> Tensor Value Bool +-- This operation returns N 1-D integer tensors representing shape of +-- `input[i]s`. +shapeN :: (TensorType t, OneOf '[Int32, Int64] out_type) => [Tensor v'1 t] -> [Tensor Build out_type] +shapeN' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> [Tensor v'1 t] -> [Tensor Build out_type] --- | Returns the truth value of (x == y) element-wise. +-- | Generate a sharded filename. The filename is printf formatted as -- ---
    ---
  • NOTE*: Equal supports broadcasting. More about --- broadcasting here
  • ---
-equal :: (TensorType t, OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool +-- %s-%05d-of-%05d, basename, shard, num_shards. +shardedFilename :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor Build ByteString +shardedFilename' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Int32 -> Tensor Build ByteString --- | Returns the truth value of (x >= y) element-wise. --- ---
    ---
  • NOTE*: GreaterEqual supports broadcasting. More about --- broadcasting here
  • ---
-greaterEqual :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool +-- | Generate a glob pattern matching all sharded file names. +shardedFilespec :: Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString +shardedFilespec' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor Build ByteString --- | Returns the truth value of (x <= y) element-wise. +-- | Computes sigmoid of x element-wise. -- ---
    ---
  • NOTE*: LessEqual supports broadcasting. More about --- broadcasting here
  • ---
-lessEqual :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool +-- Specifically, `y = 1 / (1 + exp(-x))`. +sigmoid :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +sigmoid' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t --- | Returns the truth value of (x < y) element-wise. +-- | Computes the gradient of the sigmoid of x wrt its input. -- ---
    ---
  • NOTE*: Less supports broadcasting. More about --- broadcasting here
  • ---
-less :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool - --- | Compute the polygamma function \(psi^{(n)}(x)\). --- --- The polygamma function is defined as: --- --- ``` psi^{(n)}(x) = frac{d^n}{dx^n} psi(x) ``` where \(psi(x)\) is the --- digamma function. -polygamma :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Compute the lower regularized incomplete Gamma function `Q(a, x)`. --- --- The lower regularized incomplete Gamma function is defined as: --- --- ``` P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) ``` where ``` --- gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt ``` is the lower --- incomplete Gamma function. --- --- Note, above `Q(a, x)` (Igammac) is the upper regularized --- complete Gamma function. -igamma :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Compute the upper regularized incomplete Gamma function `Q(a, x)`. --- --- The upper regularized incomplete Gamma function is defined as: --- --- ``` Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) ``` where ``` --- Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt ``` is the upper --- incomplete Gama function. --- --- Note, above `P(a, x)` (Igamma) is the lower regularized --- complete Gamma function. -igammac :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Returns element-wise remainder of division. --- ---
    ---
  • NOTE*: Mod supports broadcasting. More about broadcasting --- here
  • ---
-mod :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Returns the min of x and y (i.e. x < y ? x : y) element-wise. --- ---
    ---
  • NOTE*: Minimum supports broadcasting. More about --- broadcasting here
  • ---
-minimum :: (TensorType t, OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Returns the max of x and y (i.e. x > y ? x : y) element-wise. --- ---
    ---
  • NOTE*: Maximum supports broadcasting. More about --- broadcasting here
  • ---
-maximum :: (TensorType t, OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Returns (x - y)(x - y) element-wise. --- ---
    ---
  • NOTE*: SquaredDifference supports broadcasting. More --- about broadcasting here
  • ---
-squaredDifference :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes softplus gradients for a softplus operation. -softplusGrad :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | BatchToSpace for 4-D tensors of type T. --- --- This is a legacy version of the more general BatchToSpaceND. --- --- Rearranges (permutes) data from batch into blocks of spatial data, --- followed by cropping. This is the reverse transformation of --- SpaceToBatch. More specifically, this op outputs a copy of the input --- tensor where values from the batch dimension are moved in --- spatial blocks to the height and width dimensions, --- followed by cropping along the height and width --- dimensions. -batchToSpace :: (TensorType t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Int64 -> Tensor v1 t -> Tensor v2 tidx -> Tensor Value t - --- | Returns x * y element-wise. --- ---
    ---
  • NOTE*: Mul supports broadcasting. More about broadcasting --- here
  • ---
-mul :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Returns element-wise integer closest to x. --- --- If the result is midway between two representable values, the even --- representable is chosen. For example: --- --- ``` rint(-1.5) ==> -2.0 rint(0.5000001) ==> 1.0 rint([-1.7, --- -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., --- 2.] ``` -rint :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Returns element-wise smallest integer in not less than x. -ceil :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Returns element-wise largest integer not greater than x. -floor :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Performs 3D max pooling on the input. -maxPool3D :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Returns which elements of x are Inf. --- --- compatibility(numpy) Equivalent to np.isinf end_compatibility -isInf :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value Bool - --- | Computes the gradients of depthwise convolution with respect to the --- input. -depthwiseConv2dNativeBackpropInput :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor v3 t -> Tensor Value t - --- | Returns which elements of x are NaN. --- --- compatibility(numpy) Equivalent to np.isnan end_compatibility -isNan :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value Bool - --- | Computes natural logarithm of (1 + x) element-wise. --- --- I.e., \(y = log_e (1 + x)\). -log1p :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes asin of x element-wise. -asin :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Finds values and indices of the k largest elements for the --- last dimension. --- --- If the input is a vector (rank-1), finds the k largest --- entries in the vector and outputs their values and indices as vectors. --- Thus `values[j]` is the j-th largest entry in input, --- and its index is `indices[j]`. --- --- For matrices (resp. higher rank input), computes the top k --- entries in each row (resp. vector along the last dimension). Thus, --- --- values.shape = indices.shape = input.shape[:-1] + [k] --- --- If two elements are equal, the lower-index element appears first. --- --- This is the same as TopK, but takes k as in input --- rather than an attr. -topKV2 :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> (Tensor Value t, Tensor Value Int32) - --- | Computes cos of x element-wise. -cos :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes sin of x element-wise. -sin :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Outputs random integers from a uniform distribution. --- --- The generated values are uniform integers in the range `[minval, --- maxval)`. The lower bound minval is included in the range, --- while the upper bound maxval is excluded. --- --- The random integers are slightly biased unless `maxval - minval` is an --- exact power of two. The bias is small for values of `maxval - minval` --- significantly smaller than the range of the output (either `2^32` or --- `2^64`). -randomUniformInt :: (TensorType tout, OneOf '[Int32, Int64] tout, TensorType t, OneOf '[Int32, Int64] t) => Tensor v1 t -> Tensor v2 tout -> Tensor v3 tout -> Build (Tensor Value tout) - --- | Computes the complementary error function of x element-wise. -erfc :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes Psi, the derivative of Lgamma (the log of the absolute value --- of --- --- `Gamma(x)`), element-wise. -digamma :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Performs a resize and padding as a preprocess during a convolution. --- --- It's often possible to do spatial transformations more efficiently as --- part of the packing stage of a convolution, so this op allows for an --- optimized implementation where these stages are fused together. This --- prevents the need to write out the intermediate results as whole --- tensors, reducing memory pressure, and we can get some latency gains --- by merging the transformation calculations. The data_format attribute --- for Conv2D isn't supported by this op, and defaults to NHWC --- order. Internally this op uses a single per-graph scratch buffer, --- which means that it will block if multiple versions are being run in --- parallel. This is because this operator is primarily an optimization --- to minimize memory usage. -fusedResizeAndPadConv2D :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 Int32 -> Tensor v4 t -> Tensor Value t - --- | Returns x - y element-wise. --- ---
    ---
  • NOTE*: Sub supports broadcasting. More about broadcasting --- here
  • ---
-sub :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t +-- Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and +-- dy is the corresponding input gradient. +sigmoidGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +sigmoidGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Returns an element-wise indication of the sign of a number. -- @@ -919,272 +5336,97 @@ sub :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word1 -- -- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y -- = 0`. -sign :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t +sign :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +sign' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t --- | Computes the log of the absolute value of `Gamma(x)` element-wise. -lgamma :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t +-- | Computes sin of x element-wise. +sin :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +sin' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t --- | Computes natural logarithm of x element-wise. +-- | Returns the size of a tensor. -- --- I.e., \(y = log_e x\). -log :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes exponential of x element-wise. \(y = e^x\). -exp :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes the grayscale dilation of 4-D input and 3-D --- filter tensors. --- --- The input tensor has shape `[batch, in_height, in_width, --- depth]` and the filter tensor has shape `[filter_height, --- filter_width, depth]`, i.e., each input channel is processed --- independently of the others with its own structuring function. The --- output tensor has shape `[batch, out_height, out_width, --- depth]`. The spatial dimensions of the output tensor depend on the --- padding algorithm. We currently only support the default --- NHWC data_format. --- --- In detail, the grayscale morphological 2-D dilation is the max-sum --- correlation (for consistency with conv2d, we use unmirrored --- filters): --- --- output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * --- dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c] --- --- Max-pooling is a special case when the filter has size equal to the --- pooling kernel size and contains all zeros. --- --- Note on duality: The dilation of input by the filter --- is equal to the negation of the erosion of `-input` by the reflected --- filter. -dilation2D :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes the gradient for the rsqrt of x wrt its input. --- --- Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and --- dy is the corresponding input gradient. -rsqrtGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes reciprocal of square root of x element-wise. --- --- I.e., \(y = 1 / sqrt{x}\). -rsqrt :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Produces the max pool of the input tensor for quantized types. -quantizedMaxPool :: (TensorType t, OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v1 t -> Tensor v2 Float -> Tensor v3 Float -> (Tensor Value t, Tensor Value Float, Tensor Value Float) - --- | Computes square root of x element-wise. --- --- I.e., \(y = sqrt{x} = x^{1/2}\). -sqrt :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | A Reader that outputs the queued work as both the key and value. --- --- To use, enqueue strings in a Queue. ReaderRead will take the front --- work string and output (work, work). -identityReader :: Build (Tensor Ref ByteString) - --- | Computes square of x element-wise. --- --- I.e., \(y = x * x = x^2\). -square :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Reshapes a quantized tensor as per the Reshape op. --- --- ``` -quantizedReshape :: (TensorType t, TensorType tshape, OneOf '[Int32, Int64] tshape) => Tensor v1 t -> Tensor v2 tshape -> Tensor v3 Float -> Tensor v4 Float -> (Tensor Value t, Tensor Value Float, Tensor Value Float) - --- | Computes the gradient for the inverse of x wrt its input. --- --- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy is --- the corresponding input gradient. -reciprocalGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes the gradient for the inverse of x wrt its input. --- --- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and dy is --- the corresponding input gradient. -invGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes the reciprocal of x element-wise. --- --- I.e., \(y = 1 / x\). -inv :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Concat the elements from the TensorArray into value value. --- --- Takes T elements of shapes --- --- ``` (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 --- x ...) ``` --- --- and concatenates them into a Tensor of shape: --- --- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` --- --- All elements must have the same shape (excepting the first dimension). -tensorArrayConcatV2 :: (TensorType dtype) => Tensor v1 ByteString -> Tensor v2 Float -> (Tensor Value dtype, Tensor Value Int64) - --- | Computes the complex absolute value of a tensor. --- --- Given a tensor x of complex numbers, this operation returns a --- tensor of type float or double that is the absolute --- value of each element in x. All elements in x must --- be complex numbers of the form \(a + bj\). The absolute value is --- computed as \( sqrt{a^2 + b^2}\). +-- This operation returns an integer representing the number of elements +-- in input. -- -- For example: -- --- ``` # tensor x is [[-2.25 + 4.75j], [-3.25 + 5.75j]] --- tf.complex_abs(x) ==> [5.25594902, 6.60492229] ``` -complexAbs :: (TensorType t, OneOf '[Complex Double, Complex Float] t, TensorType tout, OneOf '[Double, Float] tout) => Tensor v1 t -> Tensor Value tout +-- ```prettyprint # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], +-- [4, 4, 4]]]] size(t) ==> 12 ``` +size :: (TensorType t, OneOf '[Int32, Int64] out_type) => Tensor v'1 t -> Tensor Build out_type +size' :: (TensorType t, OneOf '[Int32, Int64] out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type --- | Cast x of type SrcT to y of DstT. +-- | Parses a text file and creates a batch of examples. +skipgram :: (MonadBuild m') => Int64 -> m' ((Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)) +skipgram' :: (MonadBuild m') => OpParams -> Int64 -> m' ((Tensor Value ByteString, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int64, Tensor Value Int32, Tensor Value Int32)) + +-- | Return a slice from input. -- --- _HostCast requires its input and produces its output in host memory. -_HostCast :: (TensorType srcT, TensorType dstT) => Tensor v1 srcT -> Tensor Value dstT - --- | Resize images to size using nearest neighbor --- interpolation. -resizeNearestNeighbor :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t - --- | Deprecated. Disallowed in GraphDef version >= 2. -adjustContrast :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Float -> Tensor v3 Float -> Tensor v4 Float -> Tensor Value Float -batchMatrixDiagPart :: (TensorType t) => Tensor v1 t -> Tensor Value t -batchMatrixSetDiag :: (TensorType t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t -batchMatrixDiag :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. -fakeQuantWithMinMaxVarsPerChannelGradient :: Tensor v1 Float -> Tensor v2 Float -> Tensor v3 Float -> Tensor v4 Float -> (Tensor Value Float, Tensor Value Float, Tensor Value Float) - --- | Computes gradients for SparseSegmentSqrtN. --- --- Returns tensor "output" with same shape as grad, except for dimension --- 0 whose value is output_dim0. -sparseSegmentSqrtNGrad :: (TensorType t, OneOf '[Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor v3 Int32 -> Tensor v4 Int32 -> Tensor Value t - --- | Fake-quantize the inputs tensor of type float and one of the --- shapes: `[d]`, --- --- `[b, d]` `[b, h, w, d]` via per-channel floats min and --- max of shape `[d]` to outputs tensor of same shape as --- inputs. +-- The output tensor is a tensor with dimensions described by size +-- whose values are extracted from input starting at the offsets +-- in begin. -- --
    ---
  • min; max is the clamping range for the inputs data --- in the corresponding depth channel. Op divides this range into 255 --- steps (total of 256 values), then replaces each inputs value --- with the closest of the quantized step values.
  • +--
  • Requirements*: 0 <= begin[i] <= begin[i] + size[i] <= Di +-- for i in [0, n)
  • --
--- --- This operation has a gradient and thus allows for training min --- and max values. -fakeQuantWithMinMaxVarsPerChannel :: Tensor v1 Float -> Tensor v2 Float -> Tensor v3 Float -> Tensor Value Float +slice :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor Build t +slice' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor Build t --- | Outputs a Summary protocol buffer with scalar values. +-- | Computes softmax activations. -- --- The input tags and values must have the same shape. --- The generated summary has a summary value for each tag-value pair in --- tags and values. -scalarSummary :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 ByteString -> Tensor v2 t -> Tensor Value ByteString +-- For each batch i and class j we have +-- +-- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j])) +softmax :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +softmax' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t --- | Computes numerical negative value element-wise. +-- | Computes softmax cross entropy cost and gradients to backpropagate. -- --- I.e., \(y = -x\). -neg :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t +-- Inputs are the logits, not probabilities. +softmaxCrossEntropyWithLogits :: (OneOf '[Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) +softmaxCrossEntropyWithLogits' :: (OneOf '[Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) --- | Compute gradients for a FakeQuantWithMinMaxArgs operation. -fakeQuantWithMinMaxArgsGradient :: Tensor v1 Float -> Tensor v2 Float -> Tensor Value Float +-- | Computes softplus: `log(exp(features) + 1)`. +softplus :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t +softplus' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t --- | Debug NaN Value Counter Op --- --- Counts number of NaNs in the input tensor, for debugging. -debugNanCount :: (TensorType t) => Tensor v1 t -> Tensor Value Int64 +-- | Computes softplus gradients for a softplus operation. +softplusGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +softplusGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t --- | Debug Identity Op. --- --- Provides an identity mapping of the non-Ref type input tensor for --- debugging. -debugIdentity :: (TensorType t) => Tensor v1 t -> Tensor Value t +-- | Computes softsign: `features / (abs(features) + 1)`. +softsign :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor Build t +softsign' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t --- | Bitcasts a tensor from one type to another without copying data. --- --- Given a tensor input, this operation returns a tensor that --- has the same buffer data as input with datatype `type`. --- --- If the input datatype T is larger than the output datatype --- `type` then the shape changes from [...] to [..., --- sizeof(T)/sizeof(`type`)]. --- --- If T is smaller than `type`, the operator requires that the --- rightmost dimension be equal to sizeof(`type`)/sizeof(T). The --- shape then goes from [..., sizeof(`type`)/sizeof(T)] to --- [...]. --- ---
    ---
  • NOTE*: Bitcast is implemented as a low-level cast, so machines --- with different endian orderings will give different results.
  • ---
-bitcast :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType type', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] type') => Tensor v1 t -> Tensor Value type' +-- | Computes softsign gradients for a softsign operation. +softsignGrad :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +softsignGrad' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t --- | Computes sigmoid of x element-wise. +-- | SpaceToBatch for 4-D tensors of type T. -- --- Specifically, `y = 1 / (1 + exp(-x))`. -sigmoid :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t +-- This is a legacy version of the more general SpaceToBatchND. +-- +-- Zero-pads and then rearranges (permutes) blocks of spatial data into +-- batch. More specifically, this op outputs a copy of the input tensor +-- where values from the height and width dimensions +-- are moved to the batch dimension. After the zero-padding, +-- both height and width of the input must be divisible +-- by the block size. +spaceToBatch :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => Int64 -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t +spaceToBatch' :: (TensorType t, OneOf '[Int32, Int64] tpaddings) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tpaddings -> Tensor Build t --- | Copy Op. +-- | SpaceToBatch for N-D tensors of type T. -- --- Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on --- the device on which the tensor is allocated. --- --- Unlike the CopyHost Op, this op does not have HostMemory constraint on --- its input or output. -copy :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | Generates labels for candidate sampling with a learned unigram --- distribution. --- --- A unigram sampler could use a fixed unigram distribution read from a --- file or passed in as an in-memory array instead of building up the --- distribution from data on the fly. There is also an option to skew the --- distribution by applying a distortion power to the weights. --- --- The vocabulary file should be in CSV-like format, with the last field --- being the weight associated with the word. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -fixedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) - --- | Computes the difference between two lists of numbers or strings. --- --- Given a list x and a list y, this operation returns --- a list out that represents all values that are in x --- but not in y. The returned list out is sorted in the --- same order that the numbers appear in x (duplicates are --- preserved). This operation also returns a list idx that --- represents the position of each out element in x. In --- other words: --- --- `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` --- --- For example, given this input: --- --- ```prettyprint x = [1, 2, 3, 4, 5, 6] y = [1, 3, 5] ``` --- --- This operation would return: --- --- ```prettyprint out ==> [2, 4, 6] idx ==> [1, 3, 5] ``` -listDiff :: (TensorType t, TensorType out_idx, OneOf '[Int32, Int64] out_idx) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value out_idx) - --- | Extract patches from images and put them in the --- "depth" output dimension. -extractImagePatches :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t +-- This operation divides "spatial" dimensions `[1, ..., M]` of the input +-- into a grid of blocks of shape block_shape, and interleaves +-- these blocks with the "batch" dimension (0) such that in the output, +-- the spatial dimensions `[1, ..., M]` correspond to the position within +-- the grid, and the batch dimension combines both the position within a +-- spatial block and the original batch position. Prior to division into +-- blocks, the spatial dimensions of the input are optionally zero padded +-- according to paddings. See below for a precise description. +spaceToBatchND :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tpaddings) => Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tpaddings -> Tensor Build t +spaceToBatchND' :: (TensorType t, OneOf '[Int32, Int64] tblock_shape, OneOf '[Int32, Int64] tpaddings) => OpParams -> Tensor v'1 t -> Tensor v'2 tblock_shape -> Tensor v'3 tpaddings -> Tensor Build t -- | SpaceToDepth for tensors of type T. -- @@ -1251,209 +5493,647 @@ extractImagePatches :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, -- -- ```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], -- [13, 14, 15, 16]]]] ``` -spaceToDepth :: (TensorType t) => Int64 -> Tensor v1 t -> Tensor Value t +spaceToDepth :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor Build t +spaceToDepth' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor Build t --- | Computes the gradient of the crop_and_resize op wrt the input boxes --- tensor. -cropAndResizeGradBoxes :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Float -> Tensor v2 t -> Tensor v3 Float -> Tensor v4 Int32 -> Tensor Value Float +-- | Applies a sparse gradient to a given accumulator. Does not add if +-- local_step is +-- +-- lesser than the accumulator's global_step. +sparseAccumulatorApplyGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Bool -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 dtype -> Tensor v'5 Int64 -> m' (ControlNode) +sparseAccumulatorApplyGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Bool -> Tensor Ref ByteString -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 dtype -> Tensor v'5 Int64 -> m' (ControlNode) --- | BatchToSpace for N-D tensors of type T. +-- | Extracts the average sparse gradient in the given +-- SparseConditionalAccumulator, -- --- This operation reshapes the "batch" dimension 0 into `M + 1` --- dimensions of shape `block_shape + [batch]`, interleaves these blocks --- back into the grid defined by the spatial dimensions `[1, ..., M]`, to --- obtain a result with the same rank as the input. The spatial --- dimensions of this intermediate result are then optionally cropped --- according to crops to produce the output. This is the reverse --- of SpaceToBatch. See below for a precise description. -batchToSpaceND :: (TensorType t, TensorType tblock_shape, OneOf '[Int32, Int64] tblock_shape, TensorType tcrops, OneOf '[Int32, Int64] tcrops) => Tensor v1 t -> Tensor v2 tblock_shape -> Tensor v3 tcrops -> Tensor Value t +-- provided that sufficient (i.e., more than num_required) gradients have +-- been accumulated. The op will blocks until sufficient gradients have +-- been accumulated. If the accumulator has already aggregated more than +-- num_required gradients, it will return its average of the accumulated +-- gradients. Also automatically increments the recorded global_step in +-- the accumulator by 1, and resets the aggregate to 0. +sparseAccumulatorTakeGradient :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) +sparseAccumulatorTakeGradient' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) --- | SpaceToBatch for 4-D tensors of type T. +-- | Adds two SparseTensor objects to produce another +-- SparseTensor. -- --- This is a legacy version of the more general SpaceToBatchND. +-- The input SparseTensor objects' indices are assumed ordered +-- in standard lexicographic order. If this is not the case, before this +-- step run SparseReorder to restore index ordering. -- --- Zero-pads and then rearranges (permutes) blocks of spatial data into --- batch. More specifically, this op outputs a copy of the input tensor --- where values from the height and width dimensions --- are moved to the batch dimension. After the zero-padding, --- both height and width of the input must be divisible --- by the block size. -spaceToBatch :: (TensorType t, TensorType tpaddings, OneOf '[Int32, Int64] tpaddings) => Int64 -> Tensor v1 t -> Tensor v2 tpaddings -> Tensor Value t +-- By default, if two values sum to zero at some index, the output +-- SparseTensor would still include that particular location in +-- its index, storing a zero in the corresponding value slot. To override +-- this, callers can specify thresh, indicating that if the sum +-- has a magnitude strictly smaller than thresh, its +-- corresponding value and index would then not be included. In +-- particular, `thresh == 0` (default) means everything is kept and +-- actual thresholding happens only for a positive value. +-- +-- In the following shapes, nnz is the count after taking +-- thresh into account. +sparseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] treal) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor v'7 treal -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) +sparseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] treal) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> Tensor v'7 treal -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) --- | Adjust the hue of one or more images. +-- | The gradient operator for the SparseAdd op. -- --- images is a tensor of at least 3 dimensions. The last --- dimension is interpretted as channels, and must be three. --- --- The input image is considered in the RGB colorspace. Conceptually, the --- RGB colors are first mapped into HSV. A delta is then applied all the --- hue values, and then remapped back to RGB colorspace. -adjustHue :: Tensor v1 Float -> Tensor v2 Float -> Tensor Value Float +-- The SparseAdd op calculates A + B, where A, B, and the sum are all +-- represented as SparseTensor objects. This op takes in the +-- upstream gradient w.r.t. non-empty values of the sum, and outputs the +-- gradients w.r.t. the non-empty values of A and B. +sparseAddGrad :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> (Tensor Build t, Tensor Build t) +sparseAddGrad' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> (Tensor Build t, Tensor Build t) --- | SpaceToBatch for N-D tensors of type T. --- --- This operation divides "spatial" dimensions `[1, ..., M]` of the input --- into a grid of blocks of shape block_shape, and interleaves --- these blocks with the "batch" dimension (0) such that in the output, --- the spatial dimensions `[1, ..., M]` correspond to the position within --- the grid, and the batch dimension combines both the position within a --- spatial block and the original batch position. Prior to division into --- blocks, the spatial dimensions of the input are optionally zero padded --- according to paddings. See below for a precise description. -spaceToBatchND :: (TensorType t, TensorType tblock_shape, OneOf '[Int32, Int64] tblock_shape, TensorType tpaddings, OneOf '[Int32, Int64] tpaddings) => Tensor v1 t -> Tensor v2 tblock_shape -> Tensor v3 tpaddings -> Tensor Value t +-- | var: Should be from a Variable(). +sparseApplyAdadelta :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (Tensor Ref t) +sparseApplyAdadelta' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 tindices -> m' (Tensor Ref t) --- | Returns the diagonal part of the tensor. +-- | Update relevant entries in '*var' and '*accum' according to the +-- adagrad scheme. -- --- This operation returns a tensor with the diagonal part of the --- input. The diagonal part is computed as follows: +-- That is for rows we have grad for, we update var and accum as follows: +-- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) +sparseApplyAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (Tensor Ref t) +sparseApplyAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> m' (Tensor Ref t) + +-- | Update entries in '*var' and '*accum' according to the proximal +-- adagrad scheme. +sparseApplyAdagradDA :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (Tensor Ref t) +sparseApplyAdagradDA' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 Int64 -> m' (Tensor Ref t) + +-- | Update '*var' according to the centered RMSProp algorithm. -- --- Assume input has dimensions `[D1,..., Dk, D1,..., Dk]`, then --- the output is a tensor of rank k with dimensions `[D1,..., --- Dk]` where: +-- The centered RMSProp algorithm uses an estimate of the centered second +-- moment (i.e., the variance) for normalization, as opposed to regular +-- RMSProp, which uses the (uncentered) second moment. This often helps +-- with training, but is slightly more expensive in terms of computation +-- and memory. -- --- `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. +-- Note that in dense implementation of this algorithm, mg, ms, and mom +-- will update even if the grad is zero, but in this sparse +-- implementation, mg, ms, and mom will not update in iterations during +-- which the grad is zero. +-- +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 +-- mean_grad = decay * mean_grad + (1-decay) * gradient Delta = +-- learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** +-- 2) +-- +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * +-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom +sparseApplyCenteredRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (Tensor Ref t) +sparseApplyCenteredRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> Tensor v'10 tindices -> m' (Tensor Ref t) + +-- | Update relevant entries in '*var' according to the Ftrl-proximal +-- scheme. +-- +-- That is for rows we have grad for, we update var, accum and linear as +-- follows: accum_new = accum + grad * grad linear += grad + +-- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 +-- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - +-- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new +sparseApplyFtrl :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) +sparseApplyFtrl' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 t -> m' (Tensor Ref t) + +-- | Update relevant entries in '*var' and '*accum' according to the +-- momentum scheme. +-- +-- Set use_nesterov = True if you want to use Nesterov momentum. +-- +-- That is for rows we have grad for, we update var and accum as follows: +-- +-- accum = accum * momentum + grad var -= lr * accum +sparseApplyMomentum :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (Tensor Ref t) +sparseApplyMomentum' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 tindices -> Tensor v'6 t -> m' (Tensor Ref t) + +-- | Sparse update entries in '*var' and '*accum' according to FOBOS +-- algorithm. +-- +-- That is for rows we have grad for, we update var and accum as follows: +-- accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / +-- sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} +sparseApplyProximalAdagrad :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (Tensor Ref t) +sparseApplyProximalAdagrad' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 tindices -> m' (Tensor Ref t) + +-- | Sparse update '*var' as FOBOS algorithm with fixed learning rate. +-- +-- That is for rows we have grad for, we update var as follows: prox_v = +-- var - alpha * grad var = sign(prox_v)/(1+alpha*l2) * +-- max{|prox_v|-alpha*l1,0} +sparseApplyProximalGradientDescent :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (Tensor Ref t) +sparseApplyProximalGradientDescent' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor v'2 t -> Tensor v'3 t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 tindices -> m' (Tensor Ref t) + +-- | Update '*var' according to the RMSProp algorithm. +-- +-- Note that in dense implementation of this algorithm, ms and mom will +-- update even if the grad is zero, but in this sparse implementation, ms +-- and mom will not update in iterations during which the grad is zero. +-- +-- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = +-- learning_rate * gradient / sqrt(mean_square + epsilon) +-- +-- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * +-- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom +sparseApplyRMSProp :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (Tensor Ref t) +sparseApplyRMSProp' :: (MonadBuild m', OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v'4 t -> Tensor v'5 t -> Tensor v'6 t -> Tensor v'7 t -> Tensor v'8 t -> Tensor v'9 tindices -> m' (Tensor Ref t) + +-- | Concatenates a list of SparseTensor along the specified +-- dimension. +-- +-- Concatenation is with respect to the dense versions of these sparse +-- tensors. It is assumed that each input is a SparseTensor +-- whose elements are ordered along increasing dimension number. +-- +-- All inputs' shapes must match, except for the concat dimension. The +-- indices, values, and shapes lists must have +-- the same length. +-- +-- The output shape is identical to the inputs', except along the concat +-- dimension, where it is the sum of the inputs' sizes along that +-- dimension. +-- +-- The output elements will be resorted to preserve the sort order along +-- increasing dimension number. +-- +-- This op runs in `O(M log M)` time, where M is the total +-- number of non-empty values across all inputs. This is due to the need +-- for an internal sort in order to concatenate efficiently across an +-- arbitrary dimension. +-- +-- For example, if `concat_dim = 1` and the inputs are +-- +-- sp_inputs[0]: shape = [2, 3] [0, 2]: "a" [1, 0]: "b" [1, 1]: "c" +-- +-- sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e" +-- +-- then the output will be +-- +-- shape = [2, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [1, 1]: +-- "c" +-- +-- Graphically this is equivalent to doing +-- +--
    +--
  • a concat [ d e ] = [ a d e ]
  • +--
  • b c [ ] [b c ]
  • +--
+sparseConcat :: (TensorType t) => Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 t] -> [Tensor v'3 Int64] -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) +sparseConcat' :: (TensorType t) => OpParams -> Int64 -> [Tensor v'1 Int64] -> [Tensor v'2 t] -> [Tensor v'3 Int64] -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) + +-- | A conditional accumulator for aggregating sparse gradients. The +-- accumulator +-- +-- accepts gradients marked with local_step greater or equal to the most +-- recent global_step known to the accumulator. The average can be +-- extracted from the accumulator, provided sufficient gradients have +-- been accumulated. Extracting the average automatically resets the +-- aggregate to 0, and increments the global_step recorded by the +-- accumulator. +sparseConditionalAccumulator :: (MonadBuild m') => DataType -> Shape -> m' (Tensor Ref ByteString) +sparseConditionalAccumulator' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (Tensor Ref ByteString) + +-- | Adds up a SparseTensor and a dense Tensor, using these special rules: +-- +--
    +--
  1. Broadcasts the dense side to have the same shape as the sparse +-- side, if eligible;
  2. +--
  3. Then, only the dense values pointed to by the indices of the +-- SparseTensor participate in the cwise addition.
  4. +--
+-- +-- By these rules, the result is a logical SparseTensor with exactly the +-- same indices and shape, but possibly with different non-zero values. +-- The output of this Op is the resultant non-zero values. +sparseDenseCwiseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t +sparseDenseCwiseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t + +-- | Component-wise divides a SparseTensor by a dense Tensor. +-- +--
    +--
  • Limitation*: this Op only broadcasts the dense side to the sparse +-- side, but not the other direction.
  • +--
+sparseDenseCwiseDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t +sparseDenseCwiseDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t + +-- | Component-wise multiplies a SparseTensor by a dense Tensor. +-- +-- The output locations corresponding to the implicitly zero elements in +-- the sparse tensor will be zero (i.e., will not take up storage space), +-- regardless of the contents of the dense tensor (even if it's +/-INF +-- and that INF*0 == NaN). +-- +--
    +--
  • Limitation*: this Op only broadcasts the dense side to the sparse +-- side, but not the other direction.
  • +--
+sparseDenseCwiseMul :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t +sparseDenseCwiseMul' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t + +-- | Multiply matrix "a" by matrix "b". +-- +-- The inputs must be two-dimensional matrices and the inner dimension of +-- "a" must match the outer dimension of "b". This op is optimized for +-- the case where at least one of "a" or "b" is sparse. The breakeven for +-- using this versus a dense matrix multiply on one platform was 30% zero +-- values in the sparse matrix. +sparseMatMul :: (OneOf '[Word16, Float] ta, OneOf '[Word16, Float] tb) => Tensor v'1 ta -> Tensor v'2 tb -> Tensor Build Float +sparseMatMul' :: (OneOf '[Word16, Float] ta, OneOf '[Word16, Float] tb) => OpParams -> Tensor v'1 ta -> Tensor v'2 tb -> Tensor Build Float + +-- | Computes the sum of elements across dimensions of a SparseTensor. +-- +-- This Op takes a SparseTensor and is the sparse counterpart to +-- `tf.reduce_sum()`. In particular, this Op also returns a dense +-- Tensor instead of a sparse one. +-- +-- Reduces sp_input along the dimensions given in +-- reduction_axes. Unless keep_dims is true, the rank +-- of the tensor is reduced by 1 for each entry in +-- reduction_axes. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +-- +-- If reduction_axes has no entries, all dimensions are reduced, +-- and a tensor with a single element is returned. Additionally, the axes +-- can be negative, which are interpreted according to the indexing rules +-- in Python. +sparseReduceSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> Tensor Build t +sparseReduceSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> Tensor Build t + +-- | Computes the sum of elements across dimensions of a SparseTensor. +-- +-- This Op takes a SparseTensor and is the sparse counterpart to +-- `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a +-- SparseTensor. +-- +-- Reduces sp_input along the dimensions given in +-- reduction_axes. Unless keep_dims is true, the rank +-- of the tensor is reduced by 1 for each entry in +-- reduction_axes. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +-- +-- If reduction_axes has no entries, all dimensions are reduced, +-- and a tensor with a single element is returned. Additionally, the axes +-- can be negative, which are interpreted according to the indexing rules +-- in Python. +sparseReduceSumSparse :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) +sparseReduceSumSparse' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int32 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) + +-- | Reorders a SparseTensor into the canonical, row-major ordering. +-- +-- Note that by convention, all sparse ops preserve the canonical +-- ordering along increasing dimension number. The only time ordering can +-- be violated is during manual manipulation of the indices and values +-- vectors to add entries. +-- +-- Reordering does not affect the shape of the SparseTensor. +-- +-- If the tensor has rank R and N non-empty values, +-- input_indices has shape `[N, R]`, input_values has length +-- N, and input_shape has length R. +sparseReorder :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build t) +sparseReorder' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build t) + +-- | Reshapes a SparseTensor to represent values in a new dense shape. +-- +-- This operation has the same semantics as reshape on the represented +-- dense tensor. The input_indices are recomputed based on the +-- requested new_shape. +-- +-- If one component of new_shape is the special value -1, the +-- size of that dimension is computed so that the total dense size +-- remains constant. At most one component of new_shape can be +-- -1. The number of dense elements implied by new_shape must be +-- the same as the number of dense elements originally implied by +-- input_shape. +-- +-- Reshaping does not affect the order of values in the SparseTensor. +-- +-- If the input tensor has rank R_in and N non-empty +-- values, and new_shape has length R_out, then +-- input_indices has shape `[N, R_in]`, input_shape has +-- length R_in, output_indices has shape `[N, R_out]`, +-- and output_shape has length R_out. +sparseReshape :: Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build Int64) +sparseReshape' :: OpParams -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 Int64 -> (Tensor Build Int64, Tensor Build Int64) + +-- | Computes the mean along sparse segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Like SegmentMean, but segment_ids can have rank less +-- than `data`'s first dimension, selecting a subset of dimension 0, +-- specified by indices. +sparseSegmentMean :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t +sparseSegmentMean' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t + +-- | Computes gradients for SparseSegmentMean. +-- +-- Returns tensor "output" with same shape as grad, except for dimension +-- 0 whose value is output_dim0. +sparseSegmentMeanGrad :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t +sparseSegmentMeanGrad' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t + +-- | Computes the sum along sparse segments of a tensor divided by the sqrt +-- of N. +-- +-- N is the size of the segment being reduced. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +sparseSegmentSqrtN :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t +sparseSegmentSqrtN' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t + +-- | Computes gradients for SparseSegmentSqrtN. +-- +-- Returns tensor "output" with same shape as grad, except for dimension +-- 0 whose value is output_dim0. +sparseSegmentSqrtNGrad :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t +sparseSegmentSqrtNGrad' :: (OneOf '[Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor v'4 Int32 -> Tensor Build t + +-- | Computes the sum along sparse segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Like SegmentSum, but segment_ids can have rank less +-- than `data`'s first dimension, selecting a subset of dimension 0, +-- specified by indices. -- -- For example: -- --- ```prettyprint # input is [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, --- 3, 0] [0, 0, 0, 4]] +-- ```prettyprint c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) -- --- tf.diag_part(input) ==> [1, 2, 3, 4] ``` -diagPart :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | A placeholder op for a value that will be fed into the computation. +-- # Select two rows, one segment. tf.sparse_segment_sum(c, +-- tf.constant([0, 1]), tf.constant([0, 0])) ==> [[0 0 0 0]] -- --- N.B. This operation will fail with an error if it is executed. It is --- intended as a way to represent a value that will always be fed, and to --- provide attrs that enable the fed value to be checked at runtime. -placeholderV2 :: (TensorType dtype) => Shape -> Tensor Value dtype - --- | Computes acos of x element-wise. -acos :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | A placeholder op for a value that will be fed into the computation. +-- # Select two rows, two segment. tf.sparse_segment_sum(c, +-- tf.constant([0, 1]), tf.constant([0, 1])) ==> [[ 1 2 3 4] [-1 -2 -3 +-- -4]] -- --- N.B. This operation will fail with an error if it is executed. It is --- intended as a way to represent a value that will always be fed, and to --- provide attrs that enable the fed value to be checked at runtime. -placeholder :: (TensorType dtype) => Tensor Value dtype - --- | Does nothing. Serves as a control trigger for scheduling. +-- # Select all rows, two segments. tf.sparse_segment_sum(c, +-- tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) ==> [[0 0 0 0] [5 6 +-- 7 8]] -- --- Only useful as a placeholder for control edges. -controlTrigger :: ControlNode - --- | Computes atan of x element-wise. -atan :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Pads a tensor with mirrored values. --- --- This operation pads a input with mirrored values according to --- the paddings you specify. paddings is an integer --- tensor with shape `[n, 2]`, where n is the rank of input. For --- each dimension D of input, `paddings[D, 0]` indicates how --- many values to add before the contents of input in that --- dimension, and `paddings[D, 1]` indicates how many values to add after --- the contents of input in that dimension. Both `paddings[D, --- 0]` and `paddings[D, 1]` must be no greater than `input.dim_size(D)` --- (or `input.dim_size(D) - 1`) if copy_border is true (if --- false, respectively). --- --- The padded size of each dimension D of the output is: --- --- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` --- --- For example: --- --- ```prettyprint # t is [[1, 2, 3], [4, 5, 6]]. # --- paddings is [[1, 1]], [2, 2]]. # mode is SYMMETRIC. --- # rank of t is 2. pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, --- 2] [2, 1, 1, 2, 3, 3, 2] [5, 4, 4, 5, 6, 6, 5] [5, 4, 4, 5, 6, 6, 5]] +-- # Which is equivalent to: tf.segment_sum(c, tf.constant([0, 0, 1])) -- ``` -mirrorPad :: (TensorType t, TensorType tpaddings, OneOf '[Int32, Int64] tpaddings) => Tensor v1 t -> Tensor v2 tpaddings -> Tensor Value t +sparseSegmentSum :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t +sparseSegmentSum' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor v'3 Int32 -> Tensor Build t --- | Returns locations of true values in a boolean tensor. +-- | Applies softmax to a batched N-D SparseTensor. -- --- This operation returns the coordinates of true elements in --- input. The coordinates are returned in a 2-D tensor where the --- first dimension (rows) represents the number of true elements, and the --- second dimension (columns) represents the coordinates of the true --- elements. Keep in mind, the shape of the output tensor can vary --- depending on how many true values there are in input. Indices --- are output in row-major order. +-- The inputs represent an N-D SparseTensor with logical shape `[..., B, +-- C]` (where `N >= 2`), and with indices sorted in the canonical +-- lexicographic order. +-- +-- This op is equivalent to applying the normal `tf.nn.softmax()` to each +-- innermost logical submatrix with shape `[B, C]`, but with the catch +-- that *the implicitly zero elements do not participate*. Specifically, +-- the algorithm is equivalent to the following: +-- +--
    +--
  1. Applies `tf.nn.softmax()` to a densified view of each innermost +-- submatrix with shape `[B, C]`, along the size-C dimension;
  2. +--
  3. Masks out the original implicitly-zero locations;
  4. +--
  5. Renormalizes the remaining elements.
  6. +--
+-- +-- Hence, the SparseTensor result has exactly the same non-zero +-- indices and shape. +sparseSoftmax :: (OneOf '[Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build t +sparseSoftmax' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor Build t + +-- | Computes softmax cross entropy cost and gradients to backpropagate. +-- +-- Unlike SoftmaxCrossEntropyWithLogits, this operation does not +-- accept a matrix of label probabilities, but rather a single label per +-- row of features. This label is considered to have probability 1.0 for +-- the given row. +-- +-- Inputs are the logits, not probabilities. +sparseSoftmaxCrossEntropyWithLogits :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tlabels) => Tensor v'1 t -> Tensor v'2 tlabels -> (Tensor Build t, Tensor Build t) +sparseSoftmaxCrossEntropyWithLogits' :: (OneOf '[Word16, Double, Float] t, OneOf '[Int32, Int64] tlabels) => OpParams -> Tensor v'1 t -> Tensor v'2 tlabels -> (Tensor Build t, Tensor Build t) + +-- | Returns the element-wise max of two SparseTensors. +-- +-- Assumes the two SparseTensors have the same shape, i.e., no +-- broadcasting. +sparseSparseMaximum :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) +sparseSparseMaximum' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) + +-- | Returns the element-wise min of two SparseTensors. +-- +-- Assumes the two SparseTensors have the same shape, i.e., no +-- broadcasting. +sparseSparseMinimum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) +sparseSparseMinimum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t) + +-- | Split a SparseTensor into num_split tensors along +-- one dimension. +-- +-- If the `shape[split_dim]` is not an integer multiple of +-- num_split. Slices `[0 : shape[split_dim] % num_split]` gets +-- one extra dimension. For example, if `split_dim = 1` and `num_split = +-- 2` and the input is +-- +-- input_tensor = shape = [2, 7] [ a d e ] [b c ] +-- +-- Graphically the output tensors are: +-- +-- output_tensor[0] = shape = [2, 4] [ a ] [b c ] +-- +-- output_tensor[1] = shape = [2, 3] [ d e ] [ ] +sparseSplit :: (TensorType t) => Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64]) +sparseSplit' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 Int64 -> Tensor v'2 Int64 -> Tensor v'3 t -> Tensor v'4 Int64 -> ([Tensor Build Int64], [Tensor Build t], [Tensor Build Int64]) + +-- | Adds up a SparseTensor and a dense Tensor, producing a +-- dense Tensor. +-- +-- This Op does not require a_indices be sorted in standard +-- lexicographic order. +sparseTensorDenseAdd :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor v'4 t -> Tensor Build t +sparseTensorDenseAdd' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 t -> Tensor v'3 tindices -> Tensor v'4 t -> Tensor Build t + +-- | Multiply SparseTensor (of rank 2) A by dense matrix B. +-- +-- No validity checking is performed on the indices of A. However, the +-- following input format is recommended for optimal behavior: +-- +-- if adjoint_a == false: A should be sorted in lexicographically +-- increasing order. Use SparseReorder if you're not sure. if adjoint_a +-- == true: A should be sorted in order of increasing dimension 1 (i.e., +-- "column major" order instead of "row major" order). +sparseTensorDenseMatMul :: (TensorType t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t +sparseTensorDenseMatMul' :: (TensorType t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 t -> Tensor Build t + +-- | Converts a sparse representation into a dense tensor. +-- +-- Builds an array dense with shape output_shape such +-- that +-- +-- ```prettyprint # If sparse_indices is scalar dense[i] = (i == +-- sparse_indices ? sparse_values : default_value) +-- +-- # If sparse_indices is a vector, then for each i +-- dense[sparse_indices[i]] = sparse_values[i] +-- +-- # If sparse_indices is an n by d matrix, then for each i in [0, n) +-- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = +-- sparse_values[i] ``` +-- +-- All other values in dense are set to default_value. +-- If sparse_values is a scalar, all sparse indices are set to +-- this single value. +-- +-- Indices should be sorted in lexicographic order, and indices must not +-- contain any repeats. If validate_indices is true, these +-- properties are checked during execution. +sparseToDense :: (TensorType t, OneOf '[Int32, Int64] tindices) => Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t +sparseToDense' :: (TensorType t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t + +-- | Applies set operation along last dimension of 2 SparseTensor +-- inputs. +-- +-- See SetOperationOp::SetOperationFromContext for values of +-- set_operation. +-- +-- If validate_indices is True, +-- SparseToSparseSetOperation validates the order and range of +-- set1 and set2 indices. +-- +-- Input set1 is a SparseTensor represented by +-- set1_indices, set1_values, and set1_shape. +-- For set1 ranked n, 1st `n-1` dimensions must be the +-- same as set2. Dimension n contains values in a set, +-- duplicates are allowed but ignored. +-- +-- Input set2 is a SparseTensor represented by +-- set2_indices, set2_values, and set2_shape. +-- For set2 ranked n, 1st `n-1` dimensions must be the +-- same as set1. Dimension n contains values in a set, +-- duplicates are allowed but ignored. +-- +-- If validate_indices is True, this op validates the +-- order and range of set1 and set2 indices. +-- +-- Output result is a SparseTensor represented by +-- result_indices, result_values, and +-- result_shape. For set1 and set2 ranked +-- n, this has rank n and the same 1st `n-1` dimensions +-- as set1 and set2. The nth dimension +-- contains the result of set_operation applied to the +-- corresponding `[0...n-1]` dimension of set. +sparseToSparseSetOperation :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) +sparseToSparseSetOperation' :: (OneOf '[ByteString, Int16, Int32, Int64, Int8, Word16, Word8] t) => OpParams -> Tensor v'1 Int64 -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Int64 -> Tensor v'5 t -> Tensor v'6 Int64 -> (Tensor Build Int64, Tensor Build t, Tensor Build Int64) + +-- | Splits a tensor into num_split tensors along one dimension. +split :: (TensorType t) => Int64 -> Tensor v'1 Int32 -> Tensor v'2 t -> [Tensor Build t] +split' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 Int32 -> Tensor v'2 t -> [Tensor Build t] + +-- | Splits a tensor into num_split tensors along one dimension. +splitV :: (TensorType t, OneOf '[Int32, Int64] tlen) => Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor v'3 Int32 -> [Tensor Build t] +splitV' :: (TensorType t, OneOf '[Int32, Int64] tlen) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 tlen -> Tensor v'3 Int32 -> [Tensor Build t] + +-- | Computes square root of x element-wise. +-- +-- I.e., \(y = sqrt{x} = x^{1/2}\). +sqrt :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +sqrt' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes the gradient for the sqrt of x wrt its input. +-- +-- Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and +-- dy is the corresponding input gradient. +sqrtGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +sqrtGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes square of x element-wise. +-- +-- I.e., \(y = x * x = x^2\). +square :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +square' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Returns (x - y)(x - y) element-wise. +-- +--
    +--
  • NOTE*: SquaredDifference supports broadcasting. More +-- about broadcasting here
  • +--
+squaredDifference :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +squaredDifference' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Removes dimensions of size 1 from the shape of a tensor. +-- +-- Given a tensor input, this operation returns a tensor of the +-- same type with all dimensions of size 1 removed. If you don't want to +-- remove all size 1 dimensions, you can remove specific size 1 +-- dimensions by specifying squeeze_dims. -- -- For example: -- --- ```prettyprint # input tensor is [[True, False] # [True, --- False]] # input has two true values, so output has two --- coordinates. # input has rank of 2, so coordinates have two --- indices. where(input) ==> [[0, 0], [1, 0]] +-- ```prettyprint # t is a tensor of shape [1, 2, 1, 3, 1, 1] +-- shape(squeeze(t)) ==> [2, 3] ``` -- --- # input tensor is [[[True, False] # [True, False]] # [[False, --- True] # [False, True]] # [[False, False] # [False, True]]] # --- input has 5 true values, so output has 5 coordinates. # --- input has rank of 3, so coordinates have three indices. --- where(input) ==> [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, --- 1, 1]] ``` -where' :: Tensor v1 Bool -> Tensor Value Int64 +-- Or, to remove specific size 1 dimensions: +-- +-- ```prettyprint # t is a tensor of shape [1, 2, 1, 3, 1, 1] +-- shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] ``` +squeeze :: (TensorType t) => Tensor v'1 t -> Tensor Build t +squeeze' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t --- | Computes gradients of average pooling function. -avgPool3DGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor Value t +-- | A stack that produces elements in first-in last-out order. +stack :: (MonadBuild m') => DataType -> m' (Tensor Ref ByteString) +stack' :: (MonadBuild m') => OpParams -> DataType -> m' (Tensor Ref ByteString) --- | Restore a Reader to its initial clean state. -readerReset :: Tensor Ref ByteString -> Build (ControlNode) +-- | Delete the stack from its resource container. +stackClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) +stackClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) --- | Returns the gradient of Tile. --- --- Since Tile takes an input and repeats the input --- multiples times along each dimension, TileGrad takes --- in multiples and aggregates each repeated tile of --- input into output. -tileGrad :: (TensorType t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t +-- | Pop the element at the top of the stack. +stackPop :: (MonadBuild m', TensorType elem_type) => Tensor Ref ByteString -> m' (Tensor Value elem_type) +stackPop' :: (MonadBuild m', TensorType elem_type) => OpParams -> Tensor Ref ByteString -> m' (Tensor Value elem_type) --- | Inserts a dimension of 1 into a tensor's shape. --- --- Given a tensor input, this operation inserts a dimension of 1 --- at the dimension index dim of input's shape. The --- dimension index dim starts at zero; if you specify a negative --- number for dim it is counted backward from the end. --- --- This operation is useful if you want to add a batch dimension to a --- single element. For example, if you have a single image of shape --- `[height, width, channels]`, you can make it a batch of 1 image with --- `expand_dims(image, 0)`, which will make the shape `[1, height, width, --- channels]`. --- --- Other examples: --- --- ```prettyprint # t is a tensor of shape [2] --- shape(expand_dims(t, 0)) ==> [1, 2] shape(expand_dims(t, 1)) ==> --- [2, 1] shape(expand_dims(t, -1)) ==> [2, 1] --- --- # t2 is a tensor of shape [2, 3, 5] shape(expand_dims(t2, 0)) --- ==> [1, 2, 3, 5] shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] --- shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] ``` --- --- This operation requires that: --- --- `-1-input.dims() <= dim <= input.dims()` --- --- This operation is related to `squeeze()`, which removes dimensions of --- size 1. -expandDims :: (TensorType t, TensorType tdim, OneOf '[Int32, Int64] tdim) => Tensor v1 t -> Tensor v2 tdim -> Tensor Value t +-- | Push an element onto the stack. +stackPush :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> m' (Tensor Value t) +stackPush' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> m' (Tensor Value t) --- | Outputs a Summary protocol buffer with a tensor. -tensorSummary :: (TensorType t) => Tensor v1 t -> Tensor Value ByteString - --- | Constructs a tensor by tiling a given tensor. +-- | Stage values similar to a lightweight Enqueue. The basic functionality +-- of this -- --- This operation creates a new tensor by replicating input --- multiples times. The output tensor's i'th dimension has --- `input.dims(i) * multiples[i]` elements, and the values of --- input are replicated `multiples[i]` times along the --- ith dimension. For example, tiling `[a b c d]` by `[2]` --- produces `[a b c d a b c d]`. -tile :: (TensorType t, TensorType tmultiples, OneOf '[Int32, Int64] tmultiples) => Tensor v1 t -> Tensor v2 tmultiples -> Tensor Value t +-- Op is similar to a queue with many fewer capabilities and options. +-- This Op is optimized for performance. +stage :: (MonadBuild m', TensorTypes dtypes) => TensorList (v'1) dtypes -> m' (ControlNode) +stage' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> TensorList (v'1) dtypes -> m' (ControlNode) + +-- | Stops gradient computation. +-- +-- When executed in a graph, this op outputs its input tensor as-is. +-- +-- When building ops to compute gradients, this op prevents the +-- contribution of its inputs to be taken into account. Normally, the +-- gradient generator adds ops to a graph to compute the derivatives of a +-- specified loss by recursively finding out inputs that +-- contributed to its computation. If you insert this op in the graph it +-- inputs are masked from the gradient generator. They are not taken into +-- account for computing gradients. +-- +-- This is useful any time you want to compute a value with TensorFlow +-- but need to pretend that the value was a constant. Some examples +-- include: +-- +--
    +--
  • The *EM* algorithm where the *M-step* should not involve +-- backpropagation through the output of the *E-step*.
  • +--
  • Contrastive divergence training of Boltzmann machines where, when +-- differentiating the energy function, the training must not +-- backpropagate through the graph that generated the samples from the +-- model.
  • +--
  • Adversarial training, where no backprop should happen through the +-- adversarial example generation process.
  • +--
+stopGradient :: (TensorType t) => Tensor v'1 t -> Tensor Build t +stopGradient' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t -- | Return a strided slice from input. -- @@ -1545,34 +6225,40 @@ tile :: (TensorType t, TensorType tmultiples, OneOf '[Int32, Int64] tmultiples) --
  • Requirements*: `0 != strides[i] for i in [0, m)` `ellipsis_mask -- must be a power of two (only one ellipsis)`
  • -- -stridedSlice :: (TensorType t, TensorType index, OneOf '[Int32, Int64] index) => Tensor v1 t -> Tensor v2 index -> Tensor v3 index -> Tensor v4 index -> Tensor Value t +stridedSlice :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor Build t +stridedSlice' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor Build t --- | Return a slice from input. +-- | Assign value to the sliced l-value reference of ref. -- --- The output tensor is a tensor with dimensions described by size --- whose values are extracted from input starting at the offsets --- in begin. +-- The values of value are assigned to the positions in the +-- variable ref that are selected by the slice parameters. The +-- slice parameters `begin, end, strides, etc. work +-- exactly as in StridedSlice. -- ---
      ---
    • Requirements*: 0 <= begin[i] <= begin[i] + size[i] <= Di --- for i in [0, n)
    • ---
    -slice :: (TensorType t, TensorType index, OneOf '[Int32, Int64] index) => Tensor v1 t -> Tensor v2 index -> Tensor v3 index -> Tensor Value t +-- NOTE this op currently does not support broadcasting and so +-- value's shape must be exactly the shape produced by the slice +-- of ref. +stridedSliceAssign :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) => Tensor Ref t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> m' (Tensor Ref t) +stridedSliceAssign' :: (MonadBuild m', TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor Ref t -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> m' (Tensor Ref t) --- | Computes a 2D convolution given quantized 4D input and filter tensors. +-- | Returns the gradient of StridedSlice. -- --- The inputs are quantized tensors where the lowest value represents the --- real number of the associated minimum, and the highest represents the --- maximum. This means that you can only interpret the quantized output --- in the same way, by taking the returned minimum and maximum values --- into account. -quantizedConv2D :: (TensorType tinput, OneOf '[Int16, Int32, Word16, Word8] tinput, TensorType tfilter, OneOf '[Int16, Int32, Word16, Word8] tfilter, TensorType out_type, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v1 tinput -> Tensor v2 tfilter -> Tensor v3 Float -> Tensor v4 Float -> Tensor v5 Float -> Tensor v6 Float -> (Tensor Value out_type, Tensor Value Float, Tensor Value Float) +-- Since StridedSlice cuts out pieces of its input +-- which is size shape, its gradient will have the same shape +-- (which is passed here as shape). The gradient will be zero in +-- any element that the slice does not select. +-- +-- Arguments are the same as StridedSliceGrad with the exception that +-- dy is the input gradient to be propagated and shape is +-- the shape of StridedSlice's input. +stridedSliceGrad :: (TensorType t, OneOf '[Int32, Int64] index) => Tensor v'1 index -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> Tensor Build t +stridedSliceGrad' :: (TensorType t, OneOf '[Int32, Int64] index) => OpParams -> Tensor v'1 index -> Tensor v'2 index -> Tensor v'3 index -> Tensor v'4 index -> Tensor v'5 t -> Tensor Build t --- | Computes rectified linear 6 gradients for a Relu6 operation. -relu6Grad :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes gradients of the average pooling function. -avgPoolGrad :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor Value t +-- | Joins the strings in the given list of string tensors into one tensor; +-- +-- with the given separator (default is an empty separator). +stringJoin :: [Tensor v'1 ByteString] -> Tensor Build ByteString +stringJoin' :: OpParams -> [Tensor v'1 ByteString] -> Tensor Build ByteString -- | Split elements of input based on delimiter into a -- SparseTensor. @@ -1582,800 +6268,19 @@ avgPoolGrad :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 Int -- return a SparseTensor containing the splitted tokens. Empty -- tokens are ignored. -- --- delimiter can be empty or a single-byte character. If +-- delimiter can be empty, or a string of split characters. If -- delimiter is an empty string, each element of input -- is split into individual single-byte character strings, including --- splitting of UTF-8 multibyte sequences. +-- splitting of UTF-8 multibyte sequences. Otherwise every character of +-- delimiter is a potential split point. -- -- For example: N = 2, input[0] is 'hello world' and input[1] is 'a b c', -- then the output will be -- -- indices = [0, 0; 0, 1; 1, 0; 1, 1; 1, 2] shape = [2, 3] values = -- [hello, world, a, b, c] -stringSplit :: Tensor v1 ByteString -> Tensor v2 ByteString -> (Tensor Value Int64, Tensor Value ByteString, Tensor Value Int64) - --- | Returns the rank of a tensor. --- --- This operation returns an integer representing the rank of --- input. --- --- For example: --- --- ```prettyprint # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], --- [4, 4, 4]]] # shape of tensor t is [2, 2, 3] rank(t) ==> 3 --- ``` --- ---
      ---
    • *Note**: The rank of a tensor is not the same as the rank of a --- matrix. The rank of a tensor is the number of indices required to --- uniquely select each element of the tensor. Rank is also known as --- "order", "degree", or "ndims."
    • ---
    -rank :: (TensorType t) => Tensor v1 t -> Tensor Value Int32 - --- | Computes the reciprocal of x element-wise. --- --- I.e., \(y = 1 / x\). -reciprocal :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Reverses variable length slices. --- --- This op first slices input along the dimension --- batch_dim, and for each slice i, reverses the first --- `seq_lengths[i]` elements along the dimension seq_dim. --- --- The elements of seq_lengths must obey `seq_lengths[i] < --- input.dims[seq_dim]`, and seq_lengths must be a vector of --- length `input.dims[batch_dim]`. --- --- The output slice i along dimension batch_dim is then --- given by input slice i, with the first `seq_lengths[i]` --- slices along dimension seq_dim reversed. --- --- For example: --- --- ```prettyprint # Given this: batch_dim = 0 seq_dim = 1 input.dims = --- (4, 8, ...) seq_lengths = [7, 2, 3, 5] --- --- # then slices of input are reversed on seq_dim, but only up to --- seq_lengths: output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] --- output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] output[2, 0:3, :, --- ...] = input[2, 3:0:-1, :, ...] output[3, 0:5, :, ...] = input[3, --- 5:0:-1, :, ...] --- --- # while entries past seq_lens are copied through: output[0, 7:, :, --- ...] = input[0, 7:, :, ...] output[1, 2:, :, ...] = input[1, 2:, :, --- ...] output[2, 3:, :, ...] = input[2, 3:, :, ...] output[3, 2:, :, --- ...] = input[3, 2:, :, ...] ``` --- --- In contrast, if: --- --- ```prettyprint # Given this: batch_dim = 2 seq_dim = 0 input.dims = --- (8, ?, 4, ...) seq_lengths = [7, 2, 3, 5] --- --- # then slices of input are reversed on seq_dim, but only up to --- seq_lengths: output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] --- output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] output[0:3, :, --- 2, :, ...] = input[3:0:-1, :, 2, :, ...] output[0:5, :, 3, :, ...] = --- input[5:0:-1, :, 3, :, ...] --- --- # while entries past seq_lens are copied through: output[7:, :, 0, :, --- ...] = input[7:, :, 0, :, ...] output[2:, :, 1, :, ...] = input[2:, :, --- 1, :, ...] output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] --- output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] ``` -reverseSequence :: (TensorType t, TensorType tlen, OneOf '[Int32, Int64] tlen) => Int64 -> Tensor v1 t -> Tensor v2 tlen -> Tensor Value t - --- | The backward operation for BiasAdd on the "bias" tensor. --- --- It accumulates all the values from out_backprop into the feature --- dimension. For NHWC data format, the feature dimension is the last. --- For NCHW data format, the feature dimension is the third-to-last. -biasAddGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Add a SparseTensor to a SparseTensorsMap return its --- handle. --- --- A SparseTensor is represented by three tensors: --- sparse_indices, sparse_values, and --- sparse_shape. --- --- This operator takes the given SparseTensor and adds it to a --- container object (a SparseTensorsMap). A unique key within --- this container is generated in the form of an int64, and this --- is the value that is returned. --- --- The SparseTensor can then be read out as part of a minibatch --- by passing the key as a vector element to --- TakeManySparseFromTensorsMap. To ensure the correct --- SparseTensorsMap is accessed, ensure that the same --- container and shared_name are passed to that Op. If --- no shared_name is provided here, instead use the *name* of --- the Operation created by calling AddSparseToTensorsMap as the --- shared_name passed to TakeManySparseFromTensorsMap. --- Ensure the Operations are colocated. -addSparseToTensorsMap :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Build (Tensor Value Int64) - --- | Computes tan of x element-wise. -tan :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes the sum of elements across dimensions of a SparseTensor. --- --- This Op takes a SparseTensor and is the sparse counterpart to --- `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a --- SparseTensor. --- --- Reduces sp_input along the dimensions given in --- reduction_axes. Unless keep_dims is true, the rank --- of the tensor is reduced by 1 for each entry in --- reduction_axes. If keep_dims is true, the reduced --- dimensions are retained with length 1. --- --- If reduction_axes has no entries, all dimensions are reduced, --- and a tensor with a single element is returned. Additionally, the axes --- can be negative, which are interpreted according to the indexing rules --- in Python. -sparseReduceSumSparse :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int32 -> (Tensor Value Int64, Tensor Value t, Tensor Value Int64) - --- | Returns shape of tensors. --- --- This operation returns N 1-D integer tensors representing shape of --- `input[i]s`. -shapeN :: (TensorType t, TensorType out_type, OneOf '[Int32, Int64] out_type) => [Tensor v1 t] -> [Tensor Value out_type] - --- | Returns the shape of a tensor. --- --- This operation returns a 1-D integer tensor representing the shape of --- input. --- --- For example: --- --- ```prettyprint # t is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], --- [4, 4, 4]]] shape(t) ==> [2, 2, 3] ``` -shape :: (TensorType t, TensorType out_type, OneOf '[Int32, Int64] out_type) => Tensor v1 t -> Tensor Value out_type - --- | Finds unique elements in a 1-D tensor. --- --- This operation returns a tensor y containing all of the --- unique elements of x sorted in the same order that they occur --- in x. This operation also returns a tensor idx the --- same size as x that contains the index of each value of --- x in the unique output y. In other words: --- --- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` --- --- For example: --- --- ```prettyprint # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, --- idx = unique(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, --- 3, 4, 4] ``` -unique :: (TensorType t, TensorType out_idx, OneOf '[Int32, Int64] out_idx) => Tensor v1 t -> (Tensor Value t, Tensor Value out_idx) - --- | Outputs random values from a truncated normal distribution. --- --- The generated values follow a normal distribution with mean 0 and --- standard deviation 1, except that values whose magnitude is more than --- 2 standard deviations from the mean are dropped and re-picked. -truncatedNormal :: (TensorType dtype, OneOf '[Word16, Double, Float] dtype, TensorType t, OneOf '[Int32, Int64] t) => Tensor v1 t -> Build (Tensor Value dtype) - --- | Computes the inverse permutation of a tensor. --- --- This operation computes the inverse of an index permutation. It takes --- a 1-D integer tensor x, which represents the indices of a --- zero-based array, and swaps each value with its index position. In --- other words, for an output tensor y and an input tensor --- x, this operation computes the following: --- --- `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` --- --- The values must include 0. There can be no duplicate values or --- negative values. --- --- For example: --- --- ```prettyprint # tensor x is [3, 4, 0, 2, 1] --- invert_permutation(x) ==> [2, 4, 3, 0, 1] ``` -invertPermutation :: (TensorType t, OneOf '[Int32, Int64] t) => Tensor v1 t -> Tensor Value t - --- | Checks a tensor for NaN and Inf values. --- --- When run, reports an InvalidArgument error if tensor --- has any values that are not a number (NaN) or infinity (Inf). --- Otherwise, passes tensor as-is. -checkNumerics :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Generates labels for candidate sampling with a uniform distribution. --- --- See explanations of candidate sampling and the data formats at --- go/candidate-sampling. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -uniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) - --- | Gather slices from params according to indices. --- --- indices must be an integer tensor of any dimension (usually --- 0-D or 1-D). Produces an output tensor with shape `indices.shape + --- params.shape[1:]` where: --- --- ```python # Scalar indices output[:, ..., :] = params[indices, :, ... --- :] --- --- # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] --- --- # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, --- ..., j], :, ..., :] ``` --- --- If indices is a permutation and `len(indices) == --- params.shape[0]` then this operation will permute params --- accordingly. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/Gather.png" alt /div -gather :: (TensorType tparams, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 tparams -> Tensor v2 tindices -> Tensor Value tparams - --- | Returns a constant tensor. -const :: (TensorType dtype) => Tensor Value dtype - --- | Creates a tensor filled with a scalar value. --- --- This operation creates a tensor of shape dims and fills it --- with value. --- --- For example: --- --- ```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9) --- ==> [[9, 9, 9] [9, 9, 9]] ``` -fill :: (TensorType t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor Value t - --- | Computes the (possibly normalized) Levenshtein Edit Distance. --- --- The inputs are variable-length sequences provided by SparseTensors --- (hypothesis_indices, hypothesis_values, hypothesis_shape) and --- (truth_indices, truth_values, truth_shape). --- --- The inputs are: -editDistance :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int64 -> Tensor v5 t -> Tensor v6 Int64 -> Tensor Value Float - --- | Reverses specific dimensions of a tensor. --- --- Given a tensor, and a bool tensor dims --- representing the dimensions of tensor, this operation --- reverses each dimension i of tensor where `dims[i]` is --- True. --- --- tensor can have up to 8 dimensions. The number of dimensions --- of tensor must equal the number of elements in dims. --- In other words: --- --- `rank(tensor) = size(dims)` --- --- For example: --- --- ```prettyprint # tensor t is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, --- 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # --- [20, 21, 22, 23]]]] # tensor t shape is [1, 2, 3, 4] --- --- # dims is [False, False, False, True] reverse(t, dims) ==> --- [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], --- [19, 18, 17, 16], [23, 22, 21, 20]]]] --- --- # dims is [False, True, False, False] reverse(t, dims) ==> --- [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, 1, 2, --- 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]] --- --- # dims is [False, False, True, False] reverse(t, dims) ==> --- [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, 23], [16, --- 17, 18, 19], [12, 13, 14, 15]]]] ``` -reverse :: (TensorType t, OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Bool -> Tensor Value t - --- | Returns a batched matrix tensor with new batched diagonal values. --- --- Given input and diagonal, this operation returns a --- tensor with the same shape and values as input, except for --- the main diagonal of the innermost matrices. These will be overwritten --- by the values in diagonal. --- --- The output is computed as follows: --- --- Assume input has `k+1` dimensions `[I, J, K, ..., M, N]` and --- diagonal has k dimensions `[I, J, K, ..., min(M, --- N)]`. Then the output is a tensor of rank `k+1` with dimensions `[I, --- J, K, ..., M, N]` where: --- ---
      ---
    • `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == --- n`.
    • ---
    • `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != --- n`.
    • ---
    -matrixSetDiag :: (TensorType t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Returns a batched diagonal tensor with a given batched diagonal --- values. --- --- Given a diagonal, this operation returns a tensor with the --- diagonal and everything else padded with zeros. The diagonal --- is computed as follows: --- --- Assume diagonal has k dimensions `[I, J, K, ..., --- N]`, then the output is a tensor of rank `k+1` with dimensions [I, J, --- K, ..., N, N]` where: --- --- `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. --- --- For example: --- --- ```prettyprint # diagonal is [[1, 2, 3, 4], [5, 6, 7, 8]] --- --- and diagonal.shape = (2, 4) --- --- tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, --- 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, --- 8]]] --- --- which has shape (2, 4, 4) ``` -matrixDiag :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | Returns a diagonal tensor with a given diagonal values. --- --- Given a diagonal, this operation returns a tensor with the --- diagonal and everything else padded with zeros. The diagonal --- is computed as follows: --- --- Assume diagonal has dimensions [D1,..., Dk], then the output --- is a tensor of rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: --- --- `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 --- everywhere else. --- --- For example: --- --- ```prettyprint # diagonal is [1, 2, 3, 4] tf.diag(diagonal) --- ==> [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] ``` -diag :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Returns immutable tensor from memory region. --- --- The current implementation memmaps the tensor from a file. -immutableConst :: (TensorType dtype) => Shape -> Tensor Value dtype - --- | Concatenates tensors along one dimension. -concat :: (TensorType t) => Tensor v1 Int32 -> [Tensor v2 t] -> Tensor Value t - --- | Unpacks a given dimension of a rank-R tensor into --- num rank-`(R-1)` tensors. --- --- Unpacks num tensors from value by chipping it along --- the axis dimension. For example, given a tensor of shape `(A, --- B, C, D)`; --- --- If `axis == 0` then the i'th tensor in output is the slice --- `value[i, :, :, :]` and each tensor in output will have shape --- `(B, C, D)`. (Note that the dimension unpacked along is gone, unlike --- split). --- --- If `axis == 1` then the i'th tensor in output is the slice --- `value[:, i, :, :]` and each tensor in output will have shape --- `(A, C, D)`. Etc. --- --- This is the opposite of pack. -unpack :: (TensorType t) => Int64 -> Tensor v1 t -> [Tensor Value t] - --- | Output a fact about factorials. -fact :: Tensor Value ByteString - --- | Computes the absolute value of a tensor. --- --- Given a tensor x, this operation returns a tensor containing --- the absolute value of each element in x. For example, if x is --- an input element and y is an output element, this operation computes --- \(y = |x|\). -abs :: (TensorType t, OneOf '[Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes softmax activations. --- --- For each batch i and class j we have --- --- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j])) -softmax :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Reverses specific dimensions of a tensor. --- --- Given a tensor, and a int32 tensor axis --- representing the set of dimensions of tensor to reverse. This --- operation reverses each dimension i for which there exists --- j s.t. `axis[j] == i`. --- --- tensor can have up to 8 dimensions. The number of dimensions --- specified in axis may be 0 or more entries. If an index is --- specified more than once, a InvalidArgument error is raised. --- --- For example: --- --- ```prettyprint # tensor t is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, --- 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # --- [20, 21, 22, 23]]]] # tensor t shape is [1, 2, 3, 4] --- --- # dims is [3] or dims is -1 reverse(t, dims) ==> --- [[[[ 3, 2, 1, 0], [ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], --- [19, 18, 17, 16], [23, 22, 21, 20]]]] --- --- # dims is '[1]' (or dims is '[-3]') reverse(t, dims) --- ==> [[[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23] [[ 0, --- 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]] --- --- # dims is '[2]' (or dims is '[-2]') reverse(t, dims) --- ==> [[[[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, --- 23], [16, 17, 18, 19], [12, 13, 14, 15]]]] ``` -reverseV2 :: (TensorType tidx, OneOf '[Int32, Int64] tidx, TensorType t, OneOf '[Complex Double, Complex Float, Bool, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t - --- | Return a tensor with the same shape and contents as the input tensor --- or value. -identity :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | Adds two SparseTensor objects to produce another --- SparseTensor. --- --- The input SparseTensor objects' indices are assumed ordered --- in standard lexicographic order. If this is not the case, before this --- step run SparseReorder to restore index ordering. --- --- By default, if two values sum to zero at some index, the output --- SparseTensor would still include that particular location in --- its index, storing a zero in the corresponding value slot. To override --- this, callers can specify thresh, indicating that if the sum --- has a magnitude strictly smaller than thresh, its --- corresponding value and index would then not be included. In --- particular, `thresh == 0` (default) means everything is kept and --- actual thresholding happens only for a positive value. --- --- In the following shapes, nnz is the count after taking --- thresh into account. -sparseAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType treal, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] treal) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int64 -> Tensor v5 t -> Tensor v6 Int64 -> Tensor v7 treal -> (Tensor Value Int64, Tensor Value t, Tensor Value Int64) - --- | Update '*var' according to the centered RMSProp algorithm. --- --- The centered RMSProp algorithm uses an estimate of the centered second --- moment (i.e., the variance) for normalization, as opposed to regular --- RMSProp, which uses the (uncentered) second moment. This often helps --- with training, but is slightly more expensive in terms of computation --- and memory. --- --- Note that in dense implementation of this algorithm, mg, ms, and mom --- will update even if the grad is zero, but in this sparse --- implementation, mg, ms, and mom will not update in iterations during --- which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 --- mean_grad = decay * mean_grad + (1-decay) * gradient Delta = --- learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** --- 2) --- --- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * --- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom -sparseApplyCenteredRMSProp :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor v9 t -> Tensor v10 tindices -> Build (Tensor Ref t) - --- | Add all input tensors element wise. -addN :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => [Tensor v1 t] -> Tensor Value t - --- | Computes offsets of concat inputs within its output. --- --- For example: --- --- ```prettyprint # x is [2, 2, 7] # y is [2, 3, 7] # --- z is [2, 5, 7] concat_offset(2, [x, y, z]) => [0, 0, 0], --- [0, 2, 0], [0, 5, 0] ``` -concatOffset :: Tensor v1 Int32 -> [Tensor v2 Int32] -> [Tensor Value Int32] - --- | Concatenates tensors along one dimension. -concatV2 :: (TensorType t, TensorType tidx, OneOf '[Int32, Int64] tidx) => [Tensor v1 t] -> Tensor v2 tidx -> Tensor Value t - --- | Returns a tensor of zeros with the same shape and type as x. -zerosLike :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | Update '*var' according to the centered RMSProp algorithm. --- --- The centered RMSProp algorithm uses an estimate of the centered second --- moment (i.e., the variance) for normalization, as opposed to regular --- RMSProp, which uses the (uncentered) second moment. This often helps --- with training, but is slightly more expensive in terms of computation --- and memory. --- --- Note that in dense implementation of this algorithm, mg, ms, and mom --- will update even if the grad is zero, but in this sparse --- implementation, mg, ms, and mom will not update in iterations during --- which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 --- mean_grad = decay * mean_grad + (1-decay) * gradient --- --- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - --- mean_grad ** 2) --- --- mg <- rho * mg_{t-1} + (1-rho) * grad ms <- rho * ms_{t-1} + --- (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / --- sqrt(ms - mg * mg + epsilon) var <- var - mom -applyCenteredRMSProp :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor v9 t -> Build (Tensor Ref t) - --- | Update '*var' according to the RMSProp algorithm. --- --- Note that in dense implementation of this algorithm, ms and mom will --- update even if the grad is zero, but in this sparse implementation, ms --- and mom will not update in iterations during which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = --- learning_rate * gradient / sqrt(mean_square + epsilon) --- --- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * --- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom -applyRMSProp :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Build (Tensor Ref t) - --- | Adds a value to the current value of a variable. --- --- Any ReadVariableOp which depends directly or indirectly on this assign --- is guaranteed to see the incremented value or a subsequent newer one. --- --- Outputs the incremented value, which can be used to totally order the --- increments to this variable. -assignAddVariableOp :: (TensorType dtype) => ResourceHandle dtype -> Tensor v2 dtype -> Build (ControlNode) - --- | Update '*var' according to the Adam algorithm. --- --- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) m_t <- --- beta1 * m_{t-1} + (1 - beta1) * g_t v_t <- beta2 * v_{t-1} + (1 - --- beta2) * g_t * g_t variable <- variable - lr_t * m_t / (sqrt(v_t) + --- epsilon) -applyAdam :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor v9 t -> Tensor v10 t -> Build (Tensor Ref t) - --- | Extracts a glimpse from the input tensor. --- --- Returns a set of windows called glimpses extracted at location --- offsets from the input tensor. If the windows only partially --- overlaps the inputs, the non overlapping areas will be filled with --- random noise. --- --- The result is a 4-D tensor of shape `[batch_size, glimpse_height, --- glimpse_width, channels]`. The channels and batch dimensions are the --- same as that of the input tensor. The height and width of the output --- windows are specified in the size parameter. --- --- The argument normalized and centered controls how --- the windows are built: --- ---
      ---
    • If the coordinates are normalized but not centered, 0.0 and 1.0 --- correspond to the minimum and maximum of each height and width --- dimension.
    • ---
    • If the coordinates are both normalized and centered, they range --- from
    • ---
    • 1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper --- left corner, the lower right corner is located at (1.0, 1.0) and the --- center is at (0, 0).
    • ---
    • If the coordinates are not normalized they are interpreted as --- numbers of pixels.
    • ---
    -extractGlimpse :: Tensor v1 Float -> Tensor v2 Int32 -> Tensor v3 Float -> Tensor Value Float - --- | Update relevant entries in '*var' and '*accum' according to the --- momentum scheme. --- --- Set use_nesterov = True if you want to use Nesterov momentum. --- --- That is for rows we have grad for, we update var and accum as follows: --- --- accum = accum * momentum + grad var -= lr * accum -sparseApplyMomentum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 tindices -> Tensor v6 t -> Build (Tensor Ref t) - --- | Update '*var' according to the momentum scheme. Set use_nesterov = --- True if you --- --- want to use Nesterov momentum. --- --- accum = accum * momentum + grad var -= lr * accum -applyMomentum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Build (Tensor Ref t) - --- | A queue that produces elements in first-in first-out order. -fIFOQueue :: Build (Tensor Ref ByteString) - --- | Update relevant entries in '*var' according to the Ftrl-proximal --- scheme. --- --- That is for rows we have grad for, we update var, accum and linear as --- follows: accum_new = accum + grad * grad linear += grad + --- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 --- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - --- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new -sparseApplyFtrl :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v4 t -> Tensor v5 tindices -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor v9 t -> Build (Tensor Ref t) - --- | Update entries in '*var' and '*accum' according to the proximal --- adagrad scheme. -sparseApplyAdagradDA :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v4 t -> Tensor v5 tindices -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor v9 Int64 -> Build (Tensor Ref t) - --- | Returns x // y element-wise. --- ---
      ---
    • NOTE*: FloorDiv supports broadcasting. More about --- broadcasting here
    • ---
    -floorDiv :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Update '*var' according to the proximal adagrad scheme. -applyAdagradDA :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 Int64 -> Build (Tensor Ref t) - --- | Update '*var' according to the adagrad scheme. --- --- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) -applyAdagrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v3 t -> Tensor v4 t -> Build (Tensor Ref t) - --- | Computes the gradient of the sigmoid of x wrt its input. --- --- Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and --- dy is the corresponding input gradient. -sigmoidGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Update '*var' according to the adadelta scheme. --- --- accum = rho() * accum + (1 - rho()) * grad.square(); update = --- (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; --- update_accum = rho() * update_accum + (1 - rho()) * update.square(); --- var -= update; -applyAdadelta :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Build (Tensor Ref t) - --- | Sparse update '*var' as FOBOS algorithm with fixed learning rate. --- --- That is for rows we have grad for, we update var as follows: prox_v = --- var - alpha * grad var = sign(prox_v)/(1+alpha*l2) * --- max{|prox_v|-alpha*l1,0} -sparseApplyProximalGradientDescent :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 tindices -> Build (Tensor Ref t) - --- | Update '*var' as FOBOS algorithm with fixed learning rate. --- --- prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) * --- max{|prox_v|-alpha*l1,0} -applyProximalGradientDescent :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Build (Tensor Ref t) - --- | Solves systems of linear equations. --- --- Matrix is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices. Rhs is a tensor of shape --- `[..., M, K]`. The output is a tensor shape `[..., M, K]`. If --- adjoint is False then each output matrix satisfies --- `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If --- adjoint is True then each output matrix satisfies --- `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. -matrixSolve :: (TensorType t, OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Sparse update entries in '*var' and '*accum' according to FOBOS --- algorithm. --- --- That is for rows we have grad for, we update var and accum as follows: --- accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / --- sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} -sparseApplyProximalAdagrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 tindices -> Build (Tensor Ref t) - --- | Update '*var' by subtracting alpha * delta from it. -applyGradientDescent :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v2 t -> Tensor v3 t -> Build (Tensor Ref t) - --- | Batch normalization. --- --- This op is deprecated. Prefer `tf.nn.batch_normalization`. -batchNormWithGlobalNormalization :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Bool -> Float -> Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor Value t - --- | Encode strings into web-safe base64 format. --- --- Refer to the following article for more information on base64 format: --- en.wikipedia.orgwikiBase64. Base64 strings may have padding --- with '=' at the end so that the encoded has length multiple of 4. See --- Padding section of the link above. --- --- Web-safe means that the encoder uses - and _ instead of + and /. -encodeBase64 :: Tensor v1 ByteString -> Tensor Value ByteString - --- | Joins the strings in the given list of string tensors into one tensor; --- --- with the given separator (default is an empty separator). -stringJoin :: [Tensor v1 ByteString] -> Tensor Value ByteString - --- | Computes the gradient of the crop_and_resize op wrt the input image --- tensor. -cropAndResizeGradImage :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 Float -> Tensor v2 Float -> Tensor v3 Int32 -> Tensor v4 Int32 -> Tensor Value t - --- | Computes hyperbolic tangent of x element-wise. -tanh :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Converts each entry in the given tensor to strings. Supports many --- numeric --- --- types and boolean. -asString :: (TensorType t, OneOf '[Complex Float, Bool, Int32, Int64, Int8, Double, Float] t) => Tensor v1 t -> Tensor Value ByteString - --- | Compute the inverse 2-dimensional discrete Fourier Transform over the --- inner-most --- --- 2 dimensions of input. -iFFT2D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) - --- | Concatenates a list of SparseTensor along the specified --- dimension. --- --- Concatenation is with respect to the dense versions of these sparse --- tensors. It is assumed that each input is a SparseTensor --- whose elements are ordered along increasing dimension number. --- --- All inputs' shapes must match, except for the concat dimension. The --- indices, values, and shapes lists must have --- the same length. --- --- The output shape is identical to the inputs', except along the concat --- dimension, where it is the sum of the inputs' sizes along that --- dimension. --- --- The output elements will be resorted to preserve the sort order along --- increasing dimension number. --- --- This op runs in `O(M log M)` time, where M is the total --- number of non-empty values across all inputs. This is due to the need --- for an internal sort in order to concatenate efficiently across an --- arbitrary dimension. --- --- For example, if `concat_dim = 1` and the inputs are --- --- sp_inputs[0]: shape = [2, 3] [0, 2]: "a" [1, 0]: "b" [1, 1]: "c" --- --- sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e" --- --- then the output will be --- --- shape = [2, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [1, 1]: --- "c" --- --- Graphically this is equivalent to doing --- ---
      ---
    • a concat [ d e ] = [ a d e ]
    • ---
    • b c [ ] [b c ]
    • ---
    -sparseConcat :: (TensorType t) => Int64 -> [Tensor v1 Int64] -> [Tensor v2 t] -> [Tensor v3 Int64] -> (Tensor Value Int64, Tensor Value t, Tensor Value Int64) - --- | Generate a glob pattern matching all sharded file names. -shardedFilespec :: Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor Value ByteString - --- | Shuffle dimensions of x according to a permutation. --- --- The output y has the same rank as x. The shapes of --- x and y satisfy: `y.shape[i] == x.shape[perm[i]] for --- i in [0, 1, ..., rank(x) - 1]` -transpose :: (TensorType t, TensorType tperm, OneOf '[Int32, Int64] tperm) => Tensor v1 t -> Tensor v2 tperm -> Tensor Value t - --- | Joins a string Tensor across the given dimensions. --- --- Computes the string join across dimensions in the given string Tensor --- of shape `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by --- joining the input strings with the given separator (default: empty --- string). Negative indices are counted backwards from the end, with --- `-1` being equivalent to `n - 1`. Passing an empty --- reduction_indices joins all strings in linear index order and --- outputs a scalar string. --- --- For example: --- --- ``` # tensor a is [["a", "b"], ["c", "d"]] tf.reduce_join(a, --- 0) ==> ["ac", "bd"] tf.reduce_join(a, 1) ==> ["ab", "cd"] --- tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] --- tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] --- tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] --- tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] --- tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] --- tf.reduce_join(a, [0, 1]) ==> ["acbd"] tf.reduce_join(a, [1, 0]) --- ==> ["abcd"] tf.reduce_join(a, []) ==> ["abcd"] ``` -reduceJoin :: Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor Value ByteString +stringSplit :: Tensor v'1 ByteString -> Tensor v'2 ByteString -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64) +stringSplit' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> (Tensor Build Int64, Tensor Build ByteString, Tensor Build Int64) -- | Converts each string in the input Tensor to its hash mod by a number -- of buckets. @@ -2387,10 +6292,21 @@ reduceJoin :: Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor Value ByteString -- functionality will be deprecated and it's recommended to use -- `tf.string_to_hash_bucket_fast()` or -- `tf.string_to_hash_bucket_strong()`. -stringToHashBucket :: Int64 -> Tensor v1 ByteString -> Tensor Value Int64 +stringToHashBucket :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 +stringToHashBucket' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 --- | Draws samples from a multinomial distribution. -multinomial :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Build (Tensor Value Int64) +-- | Converts each string in the input Tensor to its hash mod by a number +-- of buckets. +-- +-- The hash function is deterministic on the content of the string within +-- the process and will never change. However, it is not suitable for +-- cryptography. This function may be used when CPU time is scarce and +-- inputs are trusted or unimportant. There is a risk of adversaries +-- constructing inputs that all hash to the same bucket. To prevent this +-- problem, use a strong hash function with +-- `tf.string_to_hash_bucket_strong`. +stringToHashBucketFast :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 +stringToHashBucketFast' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 -- | Converts each string in the input Tensor to its hash mod by a number -- of buckets. @@ -2407,3170 +6323,25 @@ multinomial :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, -- infeasible, to compute inputs that hash to the same bucket. This comes -- at a cost of roughly 4x higher compute time than -- `tf.string_to_hash_bucket_fast`. -stringToHashBucketStrong :: Int64 -> Tensor v1 ByteString -> Tensor Value Int64 - --- | Applies sparse updates to individual values or slices within --- a given --- --- variable according to indices. --- --- ref is a Tensor with rank P and --- indices is a Tensor of rank Q. --- --- indices must be integer tensor, containing indices into --- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < --- K <= P`. --- --- The innermost dimension of indices (with length K) --- corresponds to indices into elements (if `K = P`) or slices (if `K --- < P`) along the Kth dimension of ref. --- --- updates is Tensor of rank `Q-1+P-K` with shape: --- --- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` --- --- For example, say we want to update 4 scattered elements to a rank-1 --- tensor to 8 elements. In Python, that update would look like this: --- --- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = --- tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, --- 12]) update = tf.scatter_nd_update(ref, indices, updates) with --- tf.Session() as sess: print sess.run(update) --- --- The resulting update to ref would look like this: --- ---
      ---
    • 1, 11, 3, 10, 9, 6, 7, 12
    • ---
    --- --- See tf.scatter_nd for more details about how to make updates to --- slices. -scatterNdUpdate :: (TensorType t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v2 tindices -> Tensor v3 t -> Build (Tensor Ref t) - --- | Compute gradients for a FakeQuantWithMinMaxVars operation. -fakeQuantWithMinMaxVarsGradient :: Tensor v1 Float -> Tensor v2 Float -> Tensor v3 Float -> Tensor v4 Float -> (Tensor Value Float, Tensor Value Float, Tensor Value Float) - --- | Returns the size of a tensor. --- --- This operation returns an integer representing the number of elements --- in input. --- --- For example: --- --- ```prettyprint # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], --- [4, 4, 4]]]] size(t) ==> 12 ``` -size :: (TensorType t, TensorType out_type, OneOf '[Int32, Int64] out_type) => Tensor v1 t -> Tensor Value out_type - --- | Divides a variable reference by sparse updates. --- --- This operation computes --- --- # Scalar indices ref[indices, ...] /= updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] /= updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- /= updates[i, ..., j, ...] --- --- This operation outputs ref after the update is done. This --- makes it easier to chain operations that need to use the reset value. --- --- Duplicate entries are handled correctly: if multiple indices --- reference the same location, their contributions divide. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. -scatterDiv :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v2 tindices -> Tensor v3 t -> Build (Tensor Ref t) - --- | Multiplies sparse updates into a variable reference. --- --- This operation computes --- --- # Scalar indices ref[indices, ...] *= updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] *= updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- *= updates[i, ..., j, ...] --- --- This operation outputs ref after the update is done. This --- makes it easier to chain operations that need to use the reset value. --- --- Duplicate entries are handled correctly: if multiple indices --- reference the same location, their contributions multiply. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. -scatterMul :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v2 tindices -> Tensor v3 t -> Build (Tensor Ref t) - --- | Copy Host Op. --- --- Performs CPU-to-CPU deep-copying of tensor. --- --- Unlike the Copy Op, this op has HostMemory constraint on its input or --- output. -copyHost :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | A Reader that outputs the entire contents of a file as a value. --- --- To use, enqueue filenames in a Queue. The output of ReaderRead will be --- a filename (key) and the contents of that file (value). -wholeFileReader :: Build (Tensor Ref ByteString) - --- | Read SparseTensors from a SparseTensorsMap and --- concatenate them. --- --- The input sparse_handles must be an int64 matrix of --- shape `[N, 1]` where N is the minibatch size and the rows --- correspond to the output handles of AddSparseToTensorsMap or --- AddManySparseToTensorsMap. The ranks of the original --- SparseTensor objects that went into the given input ops must --- all match. When the final SparseTensor is created, it has --- rank one higher than the ranks of the incoming SparseTensor --- objects (they have been concatenated along a new row dimension on the --- left). --- --- The output SparseTensor object's shape values for all --- dimensions but the first are the max across the input --- SparseTensor objects' shape values for the corresponding --- dimensions. Its first shape value is N, the minibatch size. --- --- The input SparseTensor objects' indices are assumed ordered --- in standard lexicographic order. If this is not the case, after this --- step run SparseReorder to restore index ordering. --- --- For example, if the handles represent an input, which is a `[2, 3]` --- matrix representing two original SparseTensor objects: --- --- ``` index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] ``` --- --- and --- --- ``` index = [ 2] [10] values = [4, 5] shape = [30] ``` --- --- then the final SparseTensor will be: --- --- ``` index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] --- shape = [2 50] ``` -takeManySparseFromTensorsMap :: (TensorType dtype) => Tensor v1 Int64 -> Build ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) - --- | Destroys the temporary variable and returns its final value. --- --- Sets output to the value of the Tensor pointed to by ref, --- then destroys the temporary variable called var_name. All --- other uses of ref *must* have executed before this op. This --- is typically achieved by chaining the ref through each assign op, or --- by using control dependencies. --- --- Outputs the final value of the tensor pointed to by ref. -destroyTemporaryVariable :: (TensorType t) => Tensor Ref t -> Build (Tensor Value t) - --- | Update ref by subtracting value from it. --- --- This operation outputs "ref" after the update is done. This makes it --- easier to chain operations that need to use the reset value. -assignSub :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v2 t -> Build (Tensor Ref t) - --- | JPEG-encode an image. --- --- image is a 3-D uint8 Tensor of shape `[height, width, --- channels]`. --- --- The attr format can be used to override the color format of --- the encoded output. Values can be: --- ---
      ---
    • `''`: Use a default format based on the number of channels in the --- image.
    • ---
    • grayscale: Output a grayscale JPEG image. The --- channels dimension of image must be 1.
    • ---
    • rgb: Output an RGB JPEG image. The channels --- dimension of image must be 3.
    • ---
    --- --- If format is not specified or is the empty string, a default --- format is picked in function of the number of channels in --- image: --- ---
      ---
    • 1: Output a grayscale image.
    • ---
    • 3: Output an RGB image.
    • ---
    -encodeJpeg :: Tensor v1 Word8 -> Tensor Value ByteString - --- | Returns a tensor that may be mutated, but only persists within a --- single step. --- --- This is an experimental op for internal use only and it is possible to --- use this op in unsafe ways. DO NOT USE unless you fully understand the --- risks. --- --- It is the caller's responsibility to ensure that ref is --- eventually passed to a matching DestroyTemporaryVariable op --- after all other uses have completed. --- --- Outputs a ref to the tensor state so it may be read or modified. --- --- E.g. var = state_ops._temporary_variable([1, 2], types.float_) --- var_name = var.op.name var = state_ops.assign(var, [[4.0, 5.0]]) var = --- state_ops.assign_add(var, [[6.0, 7.0]]) final = --- state_ops._destroy_temporary_variable(var, var_name=var_name) -temporaryVariable :: (TensorType dtype) => Shape -> Build (Tensor Ref dtype) - --- | Checks whether a tensor has been initialized. --- --- Outputs boolean scalar indicating whether the tensor has been --- initialized. -isVariableInitialized :: (TensorType dtype) => Tensor Ref dtype -> Build (Tensor Value Bool) - --- | Holds state in the form of a tensor that persists across steps. --- --- Outputs a ref to the tensor state so it may be read or modified. --- TODO(zhifengc/mrry): Adds a pointer to a more detail document about --- sharing states in tensorflow. -variable :: (TensorType dtype) => Shape -> Build (Tensor Ref dtype) - --- | Returns the element-wise min of two SparseTensors. --- --- Assumes the two SparseTensors have the same shape, i.e., no --- broadcasting. -sparseSparseMinimum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int64 -> Tensor v5 t -> Tensor v6 Int64 -> (Tensor Value Int64, Tensor Value t) - --- | Compute the regularized incomplete beta integral \(I_x(a, b)\). --- --- The regularized incomplete beta integral is defined as: --- --- ``` I_x(a, b) = frac{B(x; a, b)}{B(a, b)} ``` where --- --- ``` B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt ``` --- --- is the incomplete beta function and \(B(a, b)\) is the *complete* beta --- function. -betainc :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t - --- | Update ref by assigning value to it. --- --- This operation outputs "ref" after the assignment is done. This makes --- it easier to chain operations that need to use the reset value. -assign :: (TensorType t) => Tensor Ref t -> Tensor v2 t -> Build (Tensor Ref t) - --- | Applies softmax to a batched N-D SparseTensor. --- --- The inputs represent an N-D SparseTensor with logical shape `[..., B, --- C]` (where `N >= 2`), and with indices sorted in the canonical --- lexicographic order. --- --- This op is equivalent to applying the normal `tf.nn.softmax()` to each --- innermost logical submatrix with shape `[B, C]`, but with the catch --- that *the implicitly zero elements do not participate*. Specifically, --- the algorithm is equivalent to the following: --- ---
      ---
    1. Applies `tf.nn.softmax()` to a densified view of each innermost --- submatrix with shape `[B, C]`, along the size-C dimension;
    2. ---
    3. Masks out the original implicitly-zero locations;
    4. ---
    5. Renormalizes the remaining elements.
    6. ---
    --- --- Hence, the SparseTensor result has exactly the same non-zero --- indices and shape. -sparseSoftmax :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor Value t - --- | Adds up a SparseTensor and a dense Tensor, using these special rules: --- ---
      ---
    1. Broadcasts the dense side to have the same shape as the sparse --- side, if eligible;
    2. ---
    3. Then, only the dense values pointed to by the indices of the --- SparseTensor participate in the cwise addition.
    4. ---
    --- --- By these rules, the result is a logical SparseTensor with exactly the --- same indices and shape, but possibly with different non-zero values. --- The output of this Op is the resultant non-zero values. -sparseDenseCwiseAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 t -> Tensor Value t - --- | Returns the truth value of NOT x element-wise. -logicalNot :: Tensor v1 Bool -> Tensor Value Bool - --- | Computes the number of elements in the given queue. -queueSize :: Tensor Ref ByteString -> Build (Tensor Value Int32) - --- | Update relevant entries in '*var' and '*accum' according to the --- adagrad scheme. --- --- That is for rows we have grad for, we update var and accum as follows: --- accum += grad * grad var -= lr * grad * (1 / sqrt(accum)) -sparseApplyAdagrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 tindices -> Build (Tensor Ref t) - --- | Store the input tensor in the state of the current session. -getSessionHandle :: (TensorType t) => Tensor v1 t -> Tensor Value ByteString - --- | Component-wise multiplies a SparseTensor by a dense Tensor. --- --- The output locations corresponding to the implicitly zero elements in --- the sparse tensor will be zero (i.e., will not take up storage space), --- regardless of the contents of the dense tensor (even if it's +/-INF --- and that INF*0 == NaN). --- ---
      ---
    • Limitation*: this Op only broadcasts the dense side to the sparse --- side, but not the other direction.
    • ---
    -sparseDenseCwiseMul :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 t -> Tensor Value t - --- | Adds up a SparseTensor and a dense Tensor, producing a --- dense Tensor. --- --- This Op does not require a_indices be sorted in standard --- lexicographic order. -sparseTensorDenseAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 tindices -> Tensor v2 t -> Tensor v3 tindices -> Tensor v4 t -> Tensor Value t - --- | Get the value of the tensor specified by its handle. -getSessionTensor :: (TensorType dtype) => Tensor v1 ByteString -> Tensor Value dtype - --- | Reorders a SparseTensor into the canonical, row-major ordering. --- --- Note that by convention, all sparse ops preserve the canonical --- ordering along increasing dimension number. The only time ordering can --- be violated is during manual manipulation of the indices and values --- vectors to add entries. --- --- Reordering does not affect the shape of the SparseTensor. --- --- If the tensor has rank R and N non-empty values, --- input_indices has shape `[N, R]`, input_values has length --- N, and input_shape has length R. -sparseReorder :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> (Tensor Value Int64, Tensor Value t) - --- | Split a SparseTensor into num_split tensors along --- one dimension. --- --- If the `shape[split_dim]` is not an integer multiple of --- num_split. Slices `[0 : shape[split_dim] % num_split]` gets --- one extra dimension. For example, if `split_dim = 1` and `num_split = --- 2` and the input is --- --- input_tensor = shape = [2, 7] [ a d e ] [b c ] --- --- Graphically the output tensors are: --- --- output_tensor[0] = shape = [2, 4] [ a ] [b c ] --- --- output_tensor[1] = shape = [2, 3] [ d e ] [ ] -sparseSplit :: (TensorType t) => Int64 -> Tensor v1 Int64 -> Tensor v2 Int64 -> Tensor v3 t -> Tensor v4 Int64 -> ([Tensor Value Int64], [Tensor Value t], [Tensor Value Int64]) - --- | Pads a tensor with zeros. --- --- This operation pads a input with zeros according to the --- paddings you specify. paddings is an integer tensor --- with shape `[Dn, 2]`, where n is the rank of input. For each --- dimension D of input, `paddings[D, 0]` indicates how many --- zeros to add before the contents of input in that dimension, --- and `paddings[D, 1]` indicates how many zeros to add after the --- contents of input in that dimension. --- --- The padded size of each dimension D of the output is: --- --- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` --- --- For example: --- --- ```prettyprint # t is [[1, 1], [2, 2]] # paddings is --- [[1, 1], [2, 2]] # rank of t is 2 pad(t, paddings) ==> --- [[0, 0, 0, 0, 0, 0] [0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, --- 0, 0]] ``` -pad :: (TensorType t, TensorType tpaddings, OneOf '[Int32, Int64] tpaddings) => Tensor v1 t -> Tensor v2 tpaddings -> Tensor Value t - --- | Converts a sparse representation into a dense tensor. --- --- Builds an array dense with shape output_shape such --- that --- --- ```prettyprint # If sparse_indices is scalar dense[i] = (i == --- sparse_indices ? sparse_values : default_value) --- --- # If sparse_indices is a vector, then for each i --- dense[sparse_indices[i]] = sparse_values[i] --- --- # If sparse_indices is an n by d matrix, then for each i in [0, n) --- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = --- sparse_values[i] ``` --- --- All other values in dense are set to default_value. --- If sparse_values is a scalar, all sparse indices are set to --- this single value. --- --- Indices should be sorted in lexicographic order, and indices must not --- contain any repeats. If validate_indices is true, these --- properties are checked during execution. -sparseToDense :: (TensorType t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 tindices -> Tensor v2 tindices -> Tensor v3 t -> Tensor v4 t -> Tensor Value t - --- | Multiply SparseTensor (of rank 2) A by dense matrix B. --- --- No validity checking is performed on the indices of A. However, the --- following input format is recommended for optimal behavior: --- --- if adjoint_a == false: A should be sorted in lexicographically --- increasing order. Use SparseReorder if you're not sure. if adjoint_a --- == true: A should be sorted in order of increasing dimension 1 (i.e., --- "column major" order instead of "row major" order). -sparseTensorDenseMatMul :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 t -> Tensor Value t - --- | Gradient op for MirrorPad op. This op folds a mirror-padded --- tensor. --- --- This operation folds the padded areas of input by --- MirrorPad according to the paddings you specify. --- paddings must be the same as paddings argument given --- to the corresponding MirrorPad op. --- --- The folded size of each dimension D of the output is: --- --- `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` --- --- For example: --- --- ```prettyprint # t is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. # --- paddings is [[0, 1]], [0, 1]]. # mode is SYMMETRIC. --- # rank of t is 2. pad(t, paddings) ==> [[ 1, 5] [11, 28]] --- ``` -mirrorPadGrad :: (TensorType t, TensorType tpaddings, OneOf '[Int32, Int64] tpaddings) => Tensor v1 t -> Tensor v2 tpaddings -> Tensor Value t - --- | Randomly shuffles a tensor along its first dimension. --- --- The tensor is shuffled along dimension 0, such that each `value[j]` is --- mapped to one and only one `output[i]`. For example, a mapping that --- might occur for a 3x2 tensor is: --- --- ```prettyprint [[1, 2], [[5, 6], [3, 4], ==> [1, 2], [5, 6]] [3, --- 4]] ``` -randomShuffle :: (TensorType t) => Tensor v1 t -> Build (Tensor Value t) - --- | Selects elements from t or e, depending on --- condition. --- --- The t, and e tensors must all have the same shape, --- and the output will also have that shape. --- --- The condition tensor must be a scalar if t and --- e are scalars. If t and e are vectors or --- higher rank, then condition must be either a scalar, a vector --- with size matching the first dimension of t, or must have the --- same shape as t. --- --- The condition tensor acts as a mask that chooses, based on --- the value at each element, whether the corresponding element / row in --- the output should be taken from t (if true) or e (if --- false). --- --- If condition is a vector and t and e are --- higher rank matrices, then it chooses which row (outer dimension) to --- copy from t and e. If condition has the --- same shape as t and e, then it chooses which element --- to copy from t and e. --- --- For example: --- --- ```prettyprint # condition tensor is [[True, False] # [False, --- True]] # t is [[1, 2], # [3, 4]] # e is [[5, 6], # --- [7, 8]] select(condition, t, e) ==> [[1, 6], [7, 4]] --- --- # condition tensor is [True, False] # t is [[1, 2], --- # [3, 4]] # e is [[5, 6], # [7, 8]] select(condition, t, e) --- ==> [[1, 2], [7, 8]] --- --- ``` -select :: (TensorType t) => Tensor v1 Bool -> Tensor v2 t -> Tensor v3 t -> Tensor Value t - --- | The gradient operator for the SparseAdd op. --- --- The SparseAdd op calculates A + B, where A, B, and the sum are all --- represented as SparseTensor objects. This op takes in the --- upstream gradient w.r.t. non-empty values of the sum, and outputs the --- gradients w.r.t. the non-empty values of A and B. -sparseAddGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int64 -> Tensor v3 Int64 -> Tensor v4 Int64 -> (Tensor Value t, Tensor Value t) - --- | Computes fingerprints of the input strings. -sdcaFprint :: Tensor v1 ByteString -> Tensor Value Int64 -tensorArrayUnpack :: (TensorType t) => Tensor Ref ByteString -> Tensor v2 t -> Tensor v3 Float -> Build (Tensor Value Float) - --- | Produces the average pool of the input tensor for quantized types. -quantizedAvgPool :: (TensorType t, OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v1 t -> Tensor v2 Float -> Tensor v3 Float -> (Tensor Value t, Tensor Value Float, Tensor Value Float) - --- | Adjust the contrast of one or more images. --- --- images is a tensor of at least 3 dimensions. The last 3 --- dimensions are interpreted as `[height, width, channels]`. The other --- dimensions only represent a collection of images, such as `[batch, --- height, width, channels].` --- --- Contrast is adjusted independently for each channel of each image. --- --- For each channel, the Op first computes the mean of the image pixels --- in the channel and then adjusts each component of each pixel to `(x - --- mean) * contrast_factor + mean`. -adjustContrastv2 :: Tensor v1 Float -> Tensor v2 Float -> Tensor Value Float - --- | Gather slices from the variable pointed to by resource --- according to indices. --- --- indices must be an integer tensor of any dimension (usually --- 0-D or 1-D). Produces an output tensor with shape `indices.shape + --- params.shape[1:]` where: --- --- ```python # Scalar indices output[:, ..., :] = params[indices, :, ... --- :] --- --- # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] --- --- # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, --- ..., j], :, ..., :] ``` -resourceGather :: (TensorType dtype, TensorType tindices, OneOf '[Int32, Int64] tindices) => ResourceHandle dtype -> Tensor v2 tindices -> Build (Tensor Value dtype) - --- | Merges summaries. --- --- This op creates a `Summary` protocol buffer that contains the --- union of all the values in the input summaries. --- --- When the Op is run, it reports an InvalidArgument error if --- multiple values in the summaries to merge use the same tag. -mergeSummary :: [Tensor v1 ByteString] -> Tensor Value ByteString - --- | Serialize a SparseTensor into a string 3-vector (1-D --- Tensor) object. -serializeSparse :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor Value ByteString - --- | Training via negative sampling. -negTrain :: Int64 -> Tensor Ref Float -> Tensor Ref Float -> Tensor v3 Int32 -> Tensor v4 Int32 -> Tensor v5 Float -> Build (ControlNode) - --- | Delete the TensorArray from its resource container. This enables --- --- the user to close and release the resource in the middle of a --- step/run. -tensorArrayCloseV2 :: Tensor v1 ByteString -> ControlNode - --- | Generates labels for candidate sampling with a learned unigram --- distribution. --- --- See explanations of candidate sampling and the data formats at --- go/candidate-sampling. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -threadUnsafeUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) +stringToHashBucketStrong :: Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 +stringToHashBucketStrong' :: OpParams -> Int64 -> Tensor v'1 ByteString -> Tensor Build Int64 -- | Converts each string in the input Tensor to the specified numeric -- type. -- -- (Note that int32 overflow results in an error while float overflow -- results in a rounded value.) -stringToNumber :: (TensorType out_type, OneOf '[Int32, Float] out_type) => Tensor v1 ByteString -> Tensor Value out_type +stringToNumber :: (OneOf '[Int32, Float] out_type) => Tensor v'1 ByteString -> Tensor Build out_type +stringToNumber' :: (OneOf '[Int32, Float] out_type) => OpParams -> Tensor v'1 ByteString -> Tensor Build out_type --- | Performs beam search decoding on the logits given in input. --- --- A note about the attribute merge_repeated: For the beam search --- decoder, this means that if consecutive entries in a beam are the --- same, only the first of these is emitted. That is, when the top path --- is "A B B B B", "A B" is returned if merge_repeated = True but "A B B --- B B" is returned if merge_repeated = False. -cTCBeamSearchDecoder :: Int64 -> Int64 -> Tensor v1 Float -> Tensor v2 Int32 -> ([Tensor Value Int64], [Tensor Value Int64], [Tensor Value Int64], Tensor Value Float) - --- | Transforms a serialized tensorflow.TensorProto proto into a Tensor. -parseTensor :: (TensorType out_type) => Tensor v1 ByteString -> Tensor Value out_type - --- | Outputs a Summary protocol buffer with images. --- --- The summary has up to max_images summary values containing --- images. The images are built from tensor which must be 4-D --- with shape `[batch_size, height, width, channels]` and where --- channels can be: +-- | Returns x - y element-wise. -- --
      ---
    • 1: tensor is interpreted as Grayscale.
    • ---
    • 3: tensor is interpreted as RGB.
    • ---
    • 4: tensor is interpreted as RGBA.
    • ---
    --- --- The images have the same number of channels as the input tensor. For --- float input, the values are normalized one image at a time to fit in --- the range `[0, 255]`. uint8 values are unchanged. The op uses --- two different normalization algorithms: --- ---
      ---
    • If the input values are all positive, they are rescaled so the --- largest one is 255.
    • ---
    • If any input value is negative, the values are shifted so input --- value 0.0 is at 127. They are then rescaled so that either the --- smallest value is 0, or the largest one is 255.
    • ---
    --- --- The tag argument is a scalar Tensor of type --- string. It is used to build the tag of the summary --- values: --- ---
      ---
    • If max_images is 1, the summary value tag is --- '*tag*/image'.
    • ---
    • If max_images is greater than 1, the summary value tags --- are generated sequentially as '*tag*/image/0', '*tag*/image/1', --- etc.
    • ---
    --- --- The bad_color argument is the color to use in the generated --- images for non-finite input values. It is a unit8 1-D tensor --- of length channels. Each element must be in the range `[0, --- 255]` (It represents the value of a pixel in the output image). --- Non-finite values in the input tensor are replaced by this tensor in --- the output image. The default value is the color red. -imageSummary :: (TensorType t, OneOf '[Word16, Word8, Float] t) => Tensor v1 ByteString -> Tensor v2 t -> Tensor Value ByteString - --- | Returns x / y element-wise for integer types. --- --- Truncation designates that negative numbers will round fractional --- quantities toward zero. I.e. -7 / 5 = 1. This matches C semantics but --- it is different than Python semantics. See FloorDiv for a --- division function that matches Python Semantics. --- ---
      ---
    • NOTE*: TruncateDiv supports broadcasting. More about --- broadcasting here
    • ---
    -truncateDiv :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes the Cholesky decomposition of one or more square matrices. --- --- The input is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices, with the same constraints as the --- single matrix Cholesky decomposition above. The output is a tensor of --- the same shape as the input containing the Cholesky decompositions for --- all input submatrices `[..., :, :]`. -cholesky :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t -batchMatrixSolveLs :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 Double -> Tensor Value t - --- | Outputs all keys and values in the table. -lookupTableExport :: (TensorType tkeys, TensorType tvalues) => Tensor Ref ByteString -> Build ((Tensor Value tkeys, Tensor Value tvalues)) -batchSvd :: (TensorType t, OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value t, Tensor Value t) - --- | Resize images to size using bicubic interpolation. --- --- Input images can be of different types but output images are always --- float. -resizeBicubic :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value Float - --- | Convert one or more images from HSV to RGB. --- --- Outputs a tensor of the same shape as the images tensor, --- containing the RGB value of the pixels. The output is only well --- defined if the value in images are in `[0,1]`. --- --- See rgb_to_hsv for a description of the HSV encoding. -hSVToRGB :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Performs 3D average pooling on the input. -avgPool3D :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Delete the stack from its resource container. -stackClose :: Tensor Ref ByteString -> Build (ControlNode) - --- | Assigns a new value to a variable. --- --- Any ReadVariableOp with a control dependency on this op is guaranteed --- to return this value or a subsequent newer value of the variable. -assignVariableOp :: (TensorType dtype) => ResourceHandle dtype -> Tensor v2 dtype -> Build (ControlNode) - --- | Local Response Normalization. --- --- The 4-D input tensor is treated as a 3-D array of 1-D vectors --- (along the last dimension), and each vector is normalized --- independently. Within a given vector, each component is divided by the --- weighted, squared sum of inputs within depth_radius. In --- detail, --- --- sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + --- depth_radius + 1] ** 2) output = input / (bias + alpha * sqr_sum) ** --- beta --- --- For details, see Krizhevsky et al., ImageNet classification with --- deep convolutional neural networks (NIPS 2012). -lRN :: (TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> Tensor Value t - --- | Compute the Hurwitz zeta function \(zeta(x, q)\). --- --- The Hurwitz zeta function is defined as: --- --- ``` zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x} ``` -zeta :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Creates a TensorArray for storing the gradients of values in the given --- handle. --- --- If the given TensorArray gradient already exists, returns a reference --- to it. --- --- Locks the size of the original TensorArray by disabling its dynamic --- size flag. --- ---
      ---
    • *A note about the input flow_in:**
    • ---
    --- --- The handle flow_in forces the execution of the gradient lookup to --- occur only after certain other operations have occurred. For example, --- when the forward TensorArray is dynamically sized, writes to this --- TensorArray may resize the object. The gradient TensorArray is --- statically sized based on the size of the forward TensorArray when --- this operation executes. Furthermore, the size of the forward --- TensorArray is frozen by this call. As a result, the flow is used to --- ensure that the call to generate the gradient TensorArray only happens --- after all writes are executed. --- --- In the case of dynamically sized TensorArrays, gradient computation --- should only be performed on read operations that have themselves been --- chained via flow to occur only after all writes have executed. That --- way the final size of the forward TensorArray is known when this --- operation is called. --- ---
      ---
    • *A note about the source attribute:**
    • ---
    --- --- TensorArray gradient calls use an accumulator TensorArray object. If --- multiple gradients are calculated and run in the same session, the --- multiple gradient nodes may accidentally flow throuth the same --- accumulator TensorArray. This double counts and generally breaks the --- TensorArray gradient flow. --- --- The solution is to identify which gradient call this particular --- TensorArray gradient is being called in. This is performed by --- identifying a unique string (e.g. "gradients", "gradients_1", ...) --- from the input gradient Tensor's name. This string is used as a suffix --- when creating the TensorArray gradient object here (the attribute --- source). --- --- The attribute source is added as a suffix to the forward --- TensorArray's name when performing the creation / lookup, so that each --- separate gradient calculation gets its own TensorArray accumulator. -tensorArrayGradV2 :: Tensor v1 ByteString -> Tensor v2 Float -> Build (Tensor Value ByteString) - --- | Cast x of type SrcT to y of DstT. -cast :: (TensorType srcT, TensorType dstT) => Tensor v1 srcT -> Tensor Value dstT - --- | Computes the Gauss error function of x element-wise. -erf :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t -batchMatrixTriangularSolve :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Adds sparse updates to the variable referenced by resource. --- --- This operation computes --- --- # Scalar indices ref[indices, ...] += updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- += updates[i, ..., j, ...] --- --- Duplicate entries are handled correctly: if multiple indices --- reference the same location, their contributions add. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterAdd.png" alt /div -resourceScatterAdd :: (TensorType dtype, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype, TensorType tindices, OneOf '[Int32, Int64] tindices) => ResourceHandle dtype -> Tensor v2 tindices -> Tensor v3 dtype -> Build (ControlNode) -batchCholeskyGrad :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t -batchMatrixInverse :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Return the same ref tensor as the input ref tensor. -refIdentity :: (TensorType t) => Tensor Ref t -> Build (Tensor Ref t) - --- | Computes the singular value decompositions of one or more matrices. --- --- Computes the SVD of each inner matrix in input such that --- `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * --- transpose(v[..., :, :])` --- --- ```prettyprint # a is a tensor containing a batch of matrices. # s is --- a tensor of singular values for each matrix. # u is the tensor --- containing of left singular vectors for each matrix. # v is the tensor --- containing of right singular vectors for each matrix. s, u, v = svd(a) --- s, _, _ = svd(a, compute_uv=False) ``` -svd :: (TensorType t, OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value t, Tensor Value t) - --- | Solves one or more linear least-squares problems. --- --- matrix is a tensor of shape `[..., M, N]` whose inner-most 2 --- dimensions form matrices of size `[M, N]`. Rhs is a tensor of shape --- `[..., M, K]`. The output is a tensor shape `[..., N, K]` where each --- output matrix solves each of the equations matrix[..., :, :] * --- output[..., :, :] = rhs[..., :, :] in the least squares sense. --- --- matrix and right-hand sides in the batch: --- --- matrix=\(A in Re^{m times n}\), rhs=\(B in Re^{m --- times k}\), output=\(X in Re^{n times k}\), --- l2_regularizer=\(lambda\). --- --- If fast is True, then the solution is computed by --- solving the normal equations using Cholesky decomposition. --- Specifically, if \(m ge n\) then \(X = (A^T A + lambda I)^{-1} A^T --- B\), which solves the least-squares problem \(X = mathrm{argmin}_{Z in --- Re^{n times k}} ||A Z - B||_F^2 + lambda ||Z||_F^2\). If \(m lt n\) --- then output is computed as \(X = A^T (A A^T + lambda I)^{-1} --- B\), which (for \(lambda = 0\)) is the minimum-norm solution to the --- under-determined linear system, i.e. \(X = mathrm{argmin}_{Z in Re^{n --- times k}} ||Z||_F^2 \), subject to \(A Z = B\). Notice that the fast --- path is only numerically stable when \(A\) is numerically full rank --- and has a condition number \(mathrm{cond}(A) lt --- frac{1}{sqrt{epsilon_{mach}}}\) or\(lambda\) is sufficiently large. --- --- If fast is False an algorithm based on the numerically --- robust complete orthogonal decomposition is used. This computes the --- minimum-norm least-squares solution, even when \(A\) is rank --- deficient. This path is typically 6-7 times slower than the fast path. --- If fast is False then l2_regularizer is --- ignored. -matrixSolveLs :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 Double -> Tensor Value t - --- | Packs a list of N rank-R tensors into one --- rank-`(R+1)` tensor. --- --- Packs the N tensors in values into a tensor with --- rank one higher than each tensor in values, by packing them --- along the axis dimension. Given a list of tensors of shape --- `(A, B, C)`; --- --- if `axis == 0` then the output tensor will have the shape --- `(N, A, B, C)`. if `axis == 1` then the output tensor will --- have the shape `(A, N, B, C)`. Etc. --- --- For example: --- --- ```prettyprint # x is [1, 4] # y is [2, 5] # --- z is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # --- Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, --- 6]] ``` --- --- This is the opposite of unpack. -pack :: (TensorType t) => [Tensor v1 t] -> Tensor Value t - --- | Closes the given barrier. --- --- This operation signals that no more new elements will be inserted in --- the given barrier. Subsequent InsertMany that try to introduce a new --- key will fail. Subsequent InsertMany operations that just add missing --- components to already existing elements will continue to succeed. --- Subsequent TakeMany operations will continue to succeed if sufficient --- completed elements remain in the barrier. Subsequent TakeMany --- operations that would block will fail immediately. -barrierClose :: Tensor Ref ByteString -> Build (ControlNode) - --- | Computes the eigen decomposition of one or more square self-adjoint --- matrices. --- --- Computes the eigenvalues and (optionally) eigenvectors of each inner --- matrix in input such that `input[..., :, :] = v[..., :, :] * --- diag(e[..., :])`. --- --- ```prettyprint # a is a tensor. # e is a tensor of eigenvalues. # v is --- a tensor of eigenvectors. e, v = self_adjoint_eig(a) e = --- self_adjoint_eig(a, compute_v=False) ``` -selfAdjointEigV2 :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value t) - --- | Subtracts sparse updates to a variable reference. --- --- # Scalar indices ref[indices, ...] -= updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] -= updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- -= updates[i, ..., j, ...] --- --- This operation outputs ref after the update is done. This --- makes it easier to chain operations that need to use the reset value. --- --- Duplicate entries are handled correctly: if multiple indices --- reference the same location, their (negated) contributions add. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterSub.png" alt /div -scatterSub :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v2 tindices -> Tensor v3 t -> Build (Tensor Ref t) - --- | Computes the Eigen Decomposition of a batch of square self-adjoint --- matrices. --- --- The input is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices, with the same constraints as the --- single matrix SelfAdjointEig. --- --- The result is a [..., M+1, M] matrix with [..., 0,:] containing the --- eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. -selfAdjointEig :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Stops gradient computation. --- --- When executed in a graph, this op outputs its input tensor as-is. --- --- When building ops to compute gradients, this op prevents the --- contribution of its inputs to be taken into account. Normally, the --- gradient generator adds ops to a graph to compute the derivatives of a --- specified loss by recursively finding out inputs that --- contributed to its computation. If you insert this op in the graph it --- inputs are masked from the gradient generator. They are not taken into --- account for computing gradients. --- --- This is useful any time you want to compute a value with TensorFlow --- but need to pretend that the value was a constant. Some examples --- include: --- ---
      ---
    • The *EM* algorithm where the *M-step* should not involve --- backpropagation through the output of the *E-step*.
    • ---
    • Contrastive divergence training of Boltzmann machines where, when --- differentiating the energy function, the training must not --- backpropagate through the graph that generated the samples from the --- model.
    • ---
    • Adversarial training, where no backprop should happen through the --- adversarial example generation process.
    • ---
    -stopGradient :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | Returns the index with the largest value across dimensions of a --- tensor. -argMax :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value Int64 - --- | Computes the reverse mode backpropagated gradient of the Cholesky --- algorithm. --- --- For an explanation see "Differentiation of the Cholesky algorithm" by --- Iain Murray http://arxiv.org/abs/1602.07527. -choleskyGrad :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Reshapes a SparseTensor to represent values in a new dense shape. --- --- This operation has the same semantics as reshape on the represented --- dense tensor. The input_indices are recomputed based on the --- requested new_shape. --- --- If one component of new_shape is the special value -1, the --- size of that dimension is computed so that the total dense size --- remains constant. At most one component of new_shape can be --- -1. The number of dense elements implied by new_shape must be --- the same as the number of dense elements originally implied by --- input_shape. --- --- Reshaping does not affect the order of values in the SparseTensor. --- --- If the input tensor has rank R_in and N non-empty --- values, and new_shape has length R_out, then --- input_indices has shape `[N, R_in]`, input_shape has --- length R_in, output_indices has shape `[N, R_out]`, --- and output_shape has length R_out. -sparseReshape :: Tensor v1 Int64 -> Tensor v2 Int64 -> Tensor v3 Int64 -> (Tensor Value Int64, Tensor Value Int64) - --- | var: Should be from a Variable(). -sparseApplyAdadelta :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 tindices -> Build (Tensor Ref t) - --- | Computes the gradient of morphological 2-D dilation with respect to --- the filter. -dilation2DBackpropFilter :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t -batchSelfAdjointEigV2 :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value t) - --- | Computes the number of incomplete elements in the given barrier. -barrierIncompleteSize :: Tensor Ref ByteString -> Build (Tensor Value Int32) - --- | Fake-quantize the inputs tensor of type float and shape `[b, --- h, w, d]` via --- --- global float scalars min and max to outputs --- tensor of same shape as inputs. --- ---
      ---
    • min; max is the clamping range for the inputs --- data. Op divides this range into 255 steps (total of 256 values), then --- replaces each inputs value with the closest of the quantized --- step values.
    • ---
    --- --- This operation has a gradient and thus allows for training min --- and max values. -fakeQuantWithMinMaxVars :: Tensor v1 Float -> Tensor v2 Float -> Tensor v3 Float -> Tensor Value Float - --- | Reads the value of a variable. --- --- The tensor returned by this operation is immutable. --- --- The value returned by this operation is guaranteed to be influenced by --- all the writes on which this operation depends directly or indirectly, --- and to not be influenced by any of the writes which depend directly or --- indirectly on this operation. -readVariableOp :: (TensorType dtype) => ResourceHandle dtype -> Build (Tensor Value dtype) - --- | Gradient for batch normalization. --- --- Note that the size of 4D Tensors are defined by either NHWC or --- NCHW. The size of 1D Tensors matches the dimension C of the 4D --- Tensors. -fusedBatchNormGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> (Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t) - --- | A queue that produces elements in first-in first-out order. --- --- Variable-size shapes are allowed by setting the corresponding shape --- dimensions to 0 in the shape attr. In this case DequeueMany will pad --- up to the maximum size of any given element in the minibatch. See --- below for details. -paddingFIFOQueue :: Build (Tensor Ref ByteString) - --- | Computes the inverse of one or more square invertible matrices or --- their --- --- adjoints (conjugate transposes). --- --- The input is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices. The output is a tensor of the same --- shape as the input containing the inverse for all input submatrices --- `[..., :, :]`. --- --- The op uses LU decomposition with partial pivoting to compute the --- inverses. --- --- If a matrix is not invertible there is no guarantee what the op does. --- It may detect the condition and raise an exception or it may simply --- return a garbage result. -matrixInverse :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Outputs a Summary protocol buffer with audio. --- --- The summary has up to max_outputs summary values containing --- audio. The audio is built from tensor which must be 3-D with --- shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, --- frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` --- with a sample rate of sample_rate. --- --- The tag argument is a scalar Tensor of type --- string. It is used to build the tag of the summary --- values: --- ---
      ---
    • If max_outputs is 1, the summary value tag is --- '*tag*/audio'.
    • ---
    • If max_outputs is greater than 1, the summary value tags --- are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', --- etc.
    • ---
    -audioSummaryV2 :: Tensor v1 ByteString -> Tensor v2 Float -> Tensor v3 Float -> Tensor Value ByteString - --- | Computes the determinant of one ore more square matrices. --- --- The input is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices. The output is a tensor containing the --- determinants for all input submatrices `[..., :, :]`. -matrixDeterminant :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Writes contents to the file at input filename. Creates file if not --- existing. -writeFile :: Tensor v1 ByteString -> Tensor v2 ByteString -> ControlNode - --- | Concatenates quantized tensors along one dimension. -quantizedConcat :: (TensorType t) => Tensor v1 Int32 -> [Tensor v2 t] -> [Tensor v3 Float] -> [Tensor v4 Float] -> (Tensor Value t, Tensor Value Float, Tensor Value Float) - --- | Creates a handle to a Variable resource. -varHandleOp :: (TensorType dtype) => Shape -> Build (ResourceHandle dtype) - --- | Assign value to the sliced l-value reference of ref. --- --- The values of value are assigned to the positions in the --- variable ref that are selected by the slice parameters. The --- slice parameters `begin, end, strides, etc. work --- exactly as in StridedSlice. --- --- NOTE this op currently does not support broadcasting and so --- value's shape must be exactly the shape produced by the slice --- of ref. -stridedSliceAssign :: (TensorType t, TensorType index, OneOf '[Int32, Int64] index) => Tensor Ref t -> Tensor v2 index -> Tensor v3 index -> Tensor v4 index -> Tensor v5 t -> Build (Tensor Ref t) - --- | Checks whether a resource handle-based variable has been initialized. -varIsInitializedOp :: ResourceHandle dtype -> Build (Tensor Value Bool) - --- | Update '*var' according to the RMSProp algorithm. --- --- Note that in dense implementation of this algorithm, ms and mom will --- update even if the grad is zero, but in this sparse implementation, ms --- and mom will not update in iterations during which the grad is zero. --- --- mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = --- learning_rate * gradient / sqrt(mean_square + epsilon) --- --- ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * --- mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom -sparseApplyRMSProp :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Tensor v9 tindices -> Build (Tensor Ref t) -batchCholesky :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t -tensorArrayGather :: (TensorType dtype) => Tensor Ref ByteString -> Tensor v2 Int32 -> Tensor v3 Float -> Build (Tensor Value dtype) - --- | Restore a reader to a previously saved state. --- --- Not all Readers support being restored, so this can produce an --- Unimplemented error. -readerRestoreState :: Tensor Ref ByteString -> Tensor v2 ByteString -> Build (ControlNode) - --- | Computes the gradient for the sqrt of x wrt its input. --- --- Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and --- dy is the corresponding input gradient. -sqrtGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Splits a tensor into num_split tensors along one dimension. -split :: (TensorType t) => Int64 -> Tensor v1 Int32 -> Tensor v2 t -> [Tensor Value t] - --- | A Reader that outputs the lines of a file delimited by '\n'. -textLineReader :: Build (Tensor Ref ByteString) - --- | Copy a tensor setting everything outside a central band in each --- innermost matrix --- --- to zero. --- --- The band part is computed as follows: Assume input --- has k dimensions `[I, J, K, ..., M, N]`, then the output is a --- tensor with the same shape where --- --- `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, --- n]`. --- --- The indicator function --- --- `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) --- && (num_upper < 0 || (n-m) <= num_upper)`. --- --- For example: --- --- ```prettyprint # if input is [[ 0, 1, 2, 3] [-1, 0, 1, 2] --- [-2, -1, 0, 1] [-3, -2, -1, 0]], --- --- tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] [-1, 0, 1, 2] --- [ 0, -1, 0, 1] [ 0, 0, -1, 0]], --- --- tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] [-1, 0, 1, 0] --- [-2, -1, 0, 1] [ 0, -2, -1, 0]] ``` --- --- Useful special cases: --- --- ```prettyprint tf.matrix_band_part(input, 0, -1) ==> Upper --- triangular part. tf.matrix_band_part(input, -1, 0) ==> Lower --- triangular part. tf.matrix_band_part(input, 0, 0) ==> Diagonal. ``` -matrixBandPart :: (TensorType t) => Tensor v1 t -> Tensor v2 Int64 -> Tensor v3 Int64 -> Tensor Value t - --- | Closes the given queue. --- --- This operation signals that no more elements will be enqueued in the --- given queue. Subsequent Enqueue(Many) operations will fail. Subsequent --- Dequeue(Many) operations will continue to succeed if sufficient --- elements remain in the queue. Subsequent Dequeue(Many) operations that --- would block will fail immediately. -queueClose :: Tensor Ref ByteString -> Build (ControlNode) - --- | V2 format specific: merges the metadata files of sharded checkpoints. --- The --- --- result is one logical checkpoint, with one physical metadata file and --- renamed data files. --- --- Intended for "grouping" multiple checkpoints in a sharded checkpoint --- setup. --- --- If delete_old_dirs is true, attempts to delete recursively the dirname --- of each path in the input checkpoint_prefixes. This is useful when --- those paths are non user-facing temporary locations. -mergeV2Checkpoints :: Tensor v1 ByteString -> Tensor v2 ByteString -> ControlNode - --- | Computes the number of complete elements in the given barrier. -barrierReadySize :: Tensor Ref ByteString -> Build (Tensor Value Int32) - --- | A queue that randomizes the order of elements. -randomShuffleQueue :: Build (Tensor Ref ByteString) - --- | Returns the truth value of (x != y) element-wise. --- ---
      ---
    • NOTE*: NotEqual supports broadcasting. More about --- broadcasting here
    • ---
    -notEqual :: (TensorType t, OneOf '[Complex Double, Complex Float, Bool, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool - --- | Greedily selects a subset of bounding boxes in descending order of --- score, --- --- pruning away boxes that have high intersection-over-union (IOU) --- overlap with previously selected boxes. Bounding boxes are supplied as --- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of --- any diagonal pair of box corners and the coordinates can be provided --- as normalized (i.e., lying in the interval [0, 1]) or absolute. Note --- that this algorithm is agnostic to where the origin is in the --- coordinate system. Note that this algorithm is invariant to orthogonal --- transformations and translations of the coordinate system; thus --- translating or reflections of the coordinate system result in the same --- boxes being selected by the algorithm. --- --- The output of this operation is a set of integers indexing into the --- input collection of bounding boxes representing the selected boxes. --- The bounding box coordinates corresponding to the selected indices can --- then be obtained using the `tf.gather operation`. For example: --- --- selected_indices = tf.image.non_max_suppression( boxes, scores, --- max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, --- selected_indices) -nonMaxSuppression :: Tensor v1 Float -> Tensor v2 Float -> Tensor v3 Int32 -> Tensor Value Int32 -tensorArrayWrite :: (TensorType t) => Tensor Ref ByteString -> Tensor v2 Int32 -> Tensor v3 t -> Tensor v4 Float -> Build (Tensor Value Float) - --- | Quantizes then dequantizes a tensor. --- --- This op simulates the precision loss from the quantized forward pass --- by: 1. Quantizing the tensor to fixed point numbers, which should --- match the target quantization method when it is used in inference. 2. --- Dequantizing it back to floating point numbers for the following ops, --- most likely matmul. --- --- There are different ways to quantize. This version does not use the --- full range of the output type, choosing to elide the lowest possible --- value for symmetry (e.g., output range is -127 to 127, not -128 to 127 --- for signed 8 bit quantization), so that 0.0 maps to 0. --- --- To perform this op, we first find the range of values in our tensor. --- The range we use is always centered on 0, so we find m such that --- ---
      ---
    1. m = max(abs(input_min), abs(input_max)) if range_given is --- true,
    2. ---
    3. m = max(max(abs(min_elem(input)), abs(max_elem(input))) --- otherwise.
    4. ---
    --- --- Our input tensor range is then [-m, m]. --- --- Next, we choose our fixed-point quantization buckets, [min_fixed, --- max_fixed]. If signed_input is true, this is --- ---
      ---
    • min_fixed, max_fixed =
    • ---
    • -(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - --- 1 .
    • ---
    --- --- Otherwise, if signed_input is false, the fixed-point range is --- ---
      ---
    • min_fixed, max_fixed = [0, (1 << num_bits) - 1].
    • ---
    --- --- From this we compute our scaling factor, s: --- --- s = (max_fixed - min_fixed) / (2 * m). --- --- Now we can quantize and dequantize the elements of our tensor. An --- element e is transformed into e': --- --- e' = (e * s).round_to_nearest() / s. --- --- Note that we have a different number of buckets in the signed vs. --- unsigned cases. For example, if num_bits == 8, we get 254 buckets in --- the signed case vs. 255 in the unsigned case. --- --- For example, suppose num_bits = 8 and m = 1. Then --- ---
      ---
    • min_fixed, max_fixed = [-127, 127], and s = (127 + 127) / 2 --- = 127.
    • ---
    --- --- Given the vector {-1, -0.5, 0, 0.3}, this is quantized to {-127, -63, --- 0, 38}, and dequantized to {-1, -63.0127, 0, 38.0127}. -quantizeAndDequantize :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Returns the next record (key, value pair) produced by a Reader. --- --- Will dequeue from the input queue if necessary (e.g. when the Reader --- needs to start reading from a new file since it has finished with the --- previous file). -readerRead :: Tensor Ref ByteString -> Tensor Ref ByteString -> Build ((Tensor Value ByteString, Tensor Value ByteString)) - --- | Solves systems of linear equations with upper or lower triangular --- matrices by --- --- backsubstitution. --- --- matrix is a tensor of shape `[..., M, M]` whose inner-most 2 --- dimensions form square matrices. If lower is True then --- the strictly upper triangular part of each inner-most matrix is --- assumed to be zero and not accessed. If lower is False then --- the strictly lower triangular part of each inner-most matrix is --- assumed to be zero and not accessed. rhs is a tensor of shape --- `[..., M, K]`. --- --- The output is a tensor of shape `[..., M, K]`. If adjoint is --- True then the innermost matrices in output` satisfy matrix --- equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If --- adjoint is False then the strictly then the innermost --- matrices in output satisfy matrix equations --- `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. -matrixTriangularSolve :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Split the data from the input value into TensorArray elements. --- --- Assuming that lengths takes on values --- --- ```(n0, n1, ..., n(T-1))``` --- --- and that value has shape --- --- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```, --- --- this splits values into a TensorArray with T tensors. --- --- TensorArray index t will be the subtensor of values with starting --- position --- --- ```(n0 + n1 + ... + n(t-1), 0, 0, ...)``` --- --- and having size --- --- ```nt x d0 x d1 x ...``` -tensorArraySplitV2 :: (TensorType t) => Tensor v1 ByteString -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Float -> Tensor Value Float - --- | Restores a tensor from checkpoint files. --- --- Reads a tensor stored in one or several files. If there are several --- files (for instance because a tensor was saved as slices), --- file_pattern may contain wildcard symbols (* and --- ?) in the filename portion only, not in the directory --- portion. --- --- If a file_pattern matches several files, --- preferred_shard can be used to hint in which file the --- requested tensor is likely to be found. This op will first open the --- file at index preferred_shard in the list of matching files --- and try to restore tensors from that file. Only if some tensors or --- tensor slices are not found in that first file, then the Op opens all --- the files. Setting preferred_shard to match the value passed --- as the shard input of a matching Save Op may speed --- up Restore. This attribute only affects performance, not correctness. --- The default value -1 means files are processed in order. --- --- See also RestoreSlice. -restore :: (TensorType dt) => Tensor v1 ByteString -> Tensor v2 ByteString -> Tensor Value dt - --- | Computes Quantized Rectified Linear X: `min(max(features, 0), --- max_value)` -quantizedReluX :: (TensorType tinput, OneOf '[Int16, Int32, Word16, Word8] tinput, TensorType out_type, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v1 tinput -> Tensor v2 Float -> Tensor v3 Float -> Tensor v4 Float -> (Tensor Value out_type, Tensor Value Float, Tensor Value Float) - --- | Extracts the average gradient in the given ConditionalAccumulator, --- provided --- --- that sufficient (i.e., more than num_required) gradients have been --- accumulated. The op blocks until sufficient gradients have been --- accumulated. If the accumulator has already aggregated more than --- num_required gradients, it returns the average of the accumulated --- gradients. Also automatically increments the recorded global_step in --- the accumulator by 1, and resets the aggregate to 0. -accumulatorTakeGradient :: (TensorType dtype, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v2 Int32 -> Build (Tensor Value dtype) - --- | Returns element-wise remainder of division. When `x < 0` xor `y --- < 0` is --- --- true, this follows Python semantics in that the result here is --- consistent with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) --- = x`. --- ---
      ---
    • NOTE*: FloorMod supports broadcasting. More about --- broadcasting here
    • ---
    -floorMod :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Returns the set of files matching a pattern. --- --- Note that this routine only supports wildcard characters in the --- basename portion of the pattern, not in the directory portion. -matchingFiles :: Tensor v1 ByteString -> Tensor Value ByteString - --- | Performs max pooling on the input. -maxPool :: (TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes the ids of the positions in sampled_candidates that match --- true_labels. --- --- When doing log-odds NCE, the result of this op should be passed --- through a SparseToDense op, then added to the logits of the sampled --- candidates. This has the effect of removing the sampled --- labels that match the true labels by making the classifier sure that --- they are sampled labels. -computeAccidentalHits :: Int64 -> Tensor v1 Int64 -> Tensor v2 Int64 -> (Tensor Value Int32, Tensor Value Int64, Tensor Value Float) - --- | Deserialize and concatenate SparseTensors from a serialized --- minibatch. --- --- The input serialized_sparse must be a string matrix of shape --- `[N x 3]` where N is the minibatch size and the rows --- correspond to packed outputs of SerializeSparse. The ranks of --- the original SparseTensor objects must all match. When the --- final SparseTensor is created, it has rank one higher than --- the ranks of the incoming SparseTensor objects (they have --- been concatenated along a new row dimension). --- --- The output SparseTensor object's shape values for all --- dimensions but the first are the max across the input --- SparseTensor objects' shape values for the corresponding --- dimensions. Its first shape value is N, the minibatch size. --- --- The input SparseTensor objects' indices are assumed ordered --- in standard lexicographic order. If this is not the case, after this --- step run SparseReorder to restore index ordering. --- --- For example, if the serialized input is a `[2 x 3]` matrix --- representing two original SparseTensor objects: --- --- index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] --- --- and --- --- index = [ 2] [10] values = [4, 5] shape = [30] --- --- then the final deserialized SparseTensor will be: --- --- index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] --- shape = [2 50] -deserializeManySparse :: (TensorType dtype) => Tensor v1 ByteString -> (Tensor Value Int64, Tensor Value dtype, Tensor Value Int64) - --- | Extracts crops from the input image tensor and bilinearly resizes them --- (possibly --- --- with aspect ratio change) to a common output size specified by --- crop_size. This is more general than the --- crop_to_bounding_box op which extracts a fixed size slice --- from the input image and does not allow resizing or aspect ratio --- change. --- --- Returns a tensor with crops from the input image at --- positions defined at the bounding box locations in boxes. The --- cropped boxes are all resized (with bilinear interpolation) to a fixed --- `size = [crop_height, crop_width]`. The result is a 4-D tensor --- `[num_boxes, crop_height, crop_width, depth]`. -cropAndResize :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Float -> Tensor v3 Int32 -> Tensor v4 Int32 -> Tensor Value Float - --- | Applies sparse updates to a variable reference. --- --- This operation computes --- --- # Scalar indices ref[indices, ...] = updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] = updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- = updates[i, ..., j, ...] --- --- This operation outputs ref after the update is done. This --- makes it easier to chain operations that need to use the reset value. --- --- If values in ref is to be updated more than once, because --- there are duplicate entires in indices, the order at which --- the updates happen for each value is undefined. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterUpdate.png" alt /div -scatterUpdate :: (TensorType t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v2 tindices -> Tensor v3 t -> Build (Tensor Ref t) - --- | Outputs random values from the Gamma distribution(s) described by --- alpha. --- --- This op uses the algorithm by Marsaglia et al. to acquire samples via --- transformation-rejection from pairs of uniform and normal random --- variables. See http://dl.acm.org/citation.cfm?id=358414 -randomGamma :: (TensorType s, OneOf '[Int32, Int64] s, TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 s -> Tensor v2 t -> Build (Tensor Value t) -batchMatrixSolve :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t -batchMatrixBandPart :: (TensorType t) => Tensor v1 t -> Tensor v2 Int64 -> Tensor v3 Int64 -> Tensor Value t -tensorArrayClose :: Tensor Ref ByteString -> Build (ControlNode) - --- | Computes the "logical and" of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -all :: (TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 Bool -> Tensor v2 tidx -> Tensor Value Bool - --- | Returns the number of records this Reader has produced. --- --- This is the same as the number of ReaderRead executions that have --- succeeded. -readerNumRecordsProduced :: Tensor Ref ByteString -> Build (Tensor Value Int64) - --- | Pop the element at the top of the stack. -stackPop :: (TensorType elem_type) => Tensor Ref ByteString -> Build (Tensor Value elem_type) - --- | Scatter the data from the input value into specific TensorArray --- elements. --- --- indices must be a vector, its length must match the first dim --- of value. -tensorArrayScatterV2 :: (TensorType t) => Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor v3 t -> Tensor v4 Float -> Tensor Value Float - --- | Converts one or more images from RGB to HSV. --- --- Outputs a tensor of the same shape as the images tensor, --- containing the HSV value of the pixels. The output is only well --- defined if the value in images are in `[0,1]`. --- --- `output[..., 0]` contains hue, `output[..., 1]` contains saturation, --- and `output[..., 2]` contains value. All HSV values are in `[0,1]`. A --- hue of 0 corresponds to pure red, hue 13 is pure green, and 23 --- is pure blue. -rGBToHSV :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Serialize an N-minibatch SparseTensor into an `[N, --- 3]` string Tensor. --- --- The SparseTensor must have rank R greater than 1, --- and the first dimension is treated as the minibatch dimension. --- Elements of the SparseTensor must be sorted in increasing --- order of this first dimension. The serialized SparseTensor --- objects going into each row of serialized_sparse will have --- rank `R-1`. --- --- The minibatch size N is extracted from `sparse_shape[0]`. -serializeManySparse :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor Value ByteString - --- | Initializes a table from a text file. --- --- It inserts one key-value pair into the table for each line of the --- file. The key and value is extracted from the whole line content, --- elements from the split line based on delimiter or the line --- number (starting from zero). Where to extract the key and value from a --- line is specified by key_index and value_index. --- ---
      ---
    • A value of -1 means use the line number(starting from zero), --- expects int64.
    • ---
    • A value of -2 means use the whole line content, expects --- string.
    • ---
    • A value >= 0 means use the index (starting at zero) of the --- split line based on delimiter.
    • ---
    -initializeTableFromTextFile :: Int64 -> Int64 -> Tensor Ref ByteString -> Tensor v2 ByteString -> Build (ControlNode) - --- | Decode a PNG-encoded image to a uint8 or uint16 tensor. --- --- The attr channels indicates the desired number of color --- channels for the decoded image. --- --- Accepted values are: --- ---
      ---
    • 0: Use the number of channels in the PNG-encoded image.
    • ---
    • 1: output a grayscale image.
    • ---
    • 3: output an RGB image.
    • ---
    • 4: output an RGBA image.
    • ---
    --- --- If needed, the PNG-encoded image is transformed to match the requested --- number of color channels. -decodePng :: (TensorType dtype, OneOf '[Word16, Word8] dtype) => Tensor v1 ByteString -> Tensor Value dtype - --- | Get the current size of the TensorArray. -tensorArraySizeV2 :: Tensor v1 ByteString -> Tensor v2 Float -> Tensor Value Int32 - --- | Returns x / y element-wise. --- ---
      ---
    • NOTE*: Div supports broadcasting. More about broadcasting +--
    • NOTE*: Sub supports broadcasting. More about broadcasting -- here
    • --
    -div :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Generates labels for candidate sampling with a log-uniform --- distribution. --- --- See explanations of candidate sampling and the data formats at --- go/candidate-sampling. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -logUniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) - --- | Defines a barrier that persists across different graph executions. --- --- A barrier represents a key-value map, where each key is a string, and --- each value is a tuple of tensors. --- --- At runtime, the barrier contains complete and --- incomplete elements. A complete element has defined tensors --- for all components of its value tuple, and may be accessed using --- BarrierTakeMany. An incomplete element has some undefined components --- in its value tuple, and may be updated using BarrierInsertMany. -barrier :: Build (Tensor Ref ByteString) - --- | Creates a variable resource. -createVariableOp :: (TensorType dtype) => ResourceHandle dtype -> Tensor v2 dtype -> Build (ControlNode) - --- | Applies a gradient to a given accumulator. Does not add if local_step --- is lesser --- --- than the accumulator's global_step. -accumulatorApplyGradient :: (TensorType dtype, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v2 Int64 -> Tensor v3 dtype -> Build (ControlNode) - --- | Outputs random values from a normal distribution. --- --- The generated values will have mean 0 and standard deviation 1. -randomStandardNormal :: (TensorType dtype, OneOf '[Word16, Double, Float] dtype, TensorType t, OneOf '[Int32, Int64] t) => Tensor v1 t -> Build (Tensor Value dtype) - --- | Outputs random values from a normal distribution. The parameters may --- each be a --- --- scalar which applies to the entire output, or a vector of length --- shape[0] which stores the parameters for each batch. -parameterizedTruncatedNormal :: (TensorType dtype, OneOf '[Word16, Double, Float] dtype, TensorType t, OneOf '[Int32, Int64] t) => Tensor v1 t -> Tensor v2 dtype -> Tensor v3 dtype -> Tensor v4 dtype -> Tensor v5 dtype -> Build (Tensor Value dtype) - --- | Updates the accumulator with a new value for global_step. Logs warning --- if the --- --- accumulator's value is already higher than new_global_step. -accumulatorSetGlobalStep :: Tensor Ref ByteString -> Tensor v2 Int64 -> Build (ControlNode) - --- | Resize images to size using bilinear interpolation. --- --- Input images can be of different types but output images are always --- float. -resizeBilinear :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value Float - --- | Quantize the input tensor of type float to output --- tensor of type T. --- ---
      ---
    • min_range, max_range are scalar floats that specify the --- range for the input data. The mode attribute --- controls exactly which calculations are used to convert the float --- values to their quantized equivalents.
    • ---
    --- --- In MIN_COMBINED mode, each value of the tensor will undergo --- the following: --- --- ``` out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) --- if T == qint8, out[i] -= (range(T) + 1) / 2.0 ``` here `range(T) = --- numeric_limitsT::max() - numeric_limitsT::min()` --- ---
      ---
    • MIN_COMBINED Mode Example*
    • ---
    --- --- Assume the input is type float and has a possible range of [0.0, 6.0] --- and the output type is quint8 ([0, 255]). The min_range and max_range --- values should be specified as 0.0 and 6.0. Quantizing from float to --- quint8 will multiply each value of the input by 255/6 and cast to --- quint8. --- --- If the output type was qint8 ([-128, 127]), the operation will --- additionally subtract each value by 128 prior to casting, so that the --- range of values aligns with the range of qint8. --- --- If the mode is MIN_FIRST, then this approach is used: --- --- ``` number_of_steps = 1 << (# of bits in T) range_adjust = --- number_of_steps / (number_of_steps - 1) range = (range_max - --- range_min) * range_adjust range_scale = number_of_steps / range --- quantized = round(input * range_scale) - round(range_min * --- range_scale) + numeric_limitsT::min() quantized = --- max(quantized, numeric_limitsT::min()) quantized = --- min(quantized, numeric_limitsT::max()) ``` --- --- The biggest difference between this and MIN_COMBINED is that the --- minimum range is rounded first, before it's subtracted from the --- rounded value. With MIN_COMBINED, a small bias is introduced where --- repeated iterations of quantizing and dequantizing will introduce a --- larger and larger error. --- --- One thing to watch out for is that the operator may choose to adjust --- the requested minimum and maximum values slightly during the --- quantization process, so you should always use the output ports as the --- range for further calculations. For example, if the requested minimum --- and maximum values are close to equal, they will be separated by a --- small epsilon value to prevent ill-formed quantized buffers from being --- created. Otherwise, you can end up with buffers where all the --- quantized values map to the same float value, which causes problems --- for operations that have to perform further calculations on them. -quantizeV2 :: (TensorType t, OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v1 Float -> Tensor v2 Float -> Tensor v3 Float -> (Tensor Value t, Tensor Value Float, Tensor Value Float) - --- | Decode a JPEG-encoded image to a uint8 tensor. --- --- The attr channels indicates the desired number of color --- channels for the decoded image. --- --- Accepted values are: --- ---
      ---
    • 0: Use the number of channels in the JPEG-encoded image.
    • ---
    • 1: output a grayscale image.
    • ---
    • 3: output an RGB image.
    • ---
    --- --- If needed, the JPEG-encoded image is transformed to match the --- requested number of color channels. --- --- The attr ratio allows downscaling the image by an integer --- factor during decoding. Allowed values are: 1, 2, 4, and 8. This is --- much faster than downscaling the image later. -decodeJpeg :: Tensor v1 ByteString -> Tensor Value Word8 - --- | Computes the power of one value to another. --- --- Given a tensor x and a tensor y, this operation --- computes \(x^y\) for corresponding elements in x and --- y. For example: --- --- ``` # tensor x is [[2, 2]], [3, 3]] # tensor y is --- [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]] ``` -pow :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Forwards the input to the output. --- --- This operator represents the loop termination condition used by the --- "pivot" switches of a loop. -loopCond :: Tensor v1 Bool -> Tensor Value Bool - --- | Reads and outputs the entire contents of the input filename. -readFile :: Tensor v1 ByteString -> Tensor Value ByteString - --- | Returns the imaginary part of a complex number. --- --- Given a tensor input of complex numbers, this operation --- returns a tensor of type float that is the imaginary part of --- each element in input. All elements in input must be --- complex numbers of the form \(a + bj\), where *a* is the real part and --- *b* is the imaginary part returned by this operation. --- --- For example: --- --- ``` # tensor input is [-2.25 + 4.75j, 3.25 + 5.75j] --- tf.imag(input) ==> [4.75, 5.75] ``` -imag :: (TensorType t, OneOf '[Complex Double, Complex Float] t, TensorType tout, OneOf '[Double, Float] tout) => Tensor v1 t -> Tensor Value tout -tensorArrayGrad :: Tensor v1 ByteString -> Tensor v2 Float -> Build (Tensor Ref ByteString) - --- | Outputs a Summary protocol buffer with a histogram. --- --- The generated `Summary` has one summary value containing a --- histogram for values. --- --- This op reports an InvalidArgument error if any value is not --- finite. -histogramSummary :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 ByteString -> Tensor v2 t -> Tensor Value ByteString - --- | Computes the gradients of 3-D convolution with respect to the input. -conv3DBackpropInputV2 :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int32 -> Tensor v2 t -> Tensor v3 t -> Tensor Value t - --- | Computes the gradient of bilinear interpolation. -resizeBilinearGrad :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 Float -> Tensor v2 t -> Tensor Value t - --- | Add an N-minibatch SparseTensor to a --- SparseTensorsMap, return N handles. --- --- A SparseTensor of rank R is represented by three --- tensors: sparse_indices, sparse_values, and --- sparse_shape, where --- --- ```sparse_indices.shape[1] == sparse_shape.shape[0] == R``` --- --- An N-minibatch of SparseTensor objects is --- represented as a SparseTensor having a first --- sparse_indices column taking values between `[0, N)`, where --- the minibatch size `N == sparse_shape[0]`. --- --- The input SparseTensor must have rank R greater than --- 1, and the first dimension is treated as the minibatch dimension. --- Elements of the SparseTensor must be sorted in increasing --- order of this first dimension. The stored SparseTensor --- objects pointed to by each row of the output sparse_handles --- will have rank `R-1`. --- --- The SparseTensor values can then be read out as part of a --- minibatch by passing the given keys as vector elements to --- TakeManySparseFromTensorsMap. To ensure the correct --- SparseTensorsMap is accessed, ensure that the same --- container and shared_name are passed to that Op. If --- no shared_name is provided here, instead use the *name* of --- the Operation created by calling AddManySparseToTensorsMap as --- the shared_name passed to --- TakeManySparseFromTensorsMap. Ensure the Operations are --- colocated. -addManySparseToTensorsMap :: (TensorType t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Build (Tensor Value Int64) -batchIFFT :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) -batchMatrixDeterminant :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Delete the tensor specified by its handle in the session. -deleteSessionTensor :: Tensor v1 ByteString -> ControlNode - --- | Computes the number of elements in the given table. -lookupTableSize :: Tensor Ref ByteString -> Build (Tensor Value Int64) - --- | Computes rectified linear: `max(features, 0)`. -relu :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Interleave the values from the `data` tensors into a single tensor. --- --- Builds a merged tensor such that --- --- ```python merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] --- ``` --- --- For example, if each `indices[m]` is scalar or vector, we have --- --- ```python # Scalar indices: merged[indices[m], ...] = data[m][...] --- --- # Vector indices: merged[indices[m][i], ...] = data[m][i, ...] ``` --- --- Each `data[i].shape` must start with the corresponding --- `indices[i].shape`, and the rest of `data[i].shape` must be constant --- w.r.t. i. That is, we must have `data[i].shape = --- indices[i].shape + constant`. In terms of this constant, the --- output shape is --- --- merged.shape = [max(indices)] + constant --- --- Values are merged in order, so if an index appears in both --- `indices[m][i]` and `indices[n][j]` for `(m,i) < (n,j)` the slice --- `data[n][j]` will appear in the merged result. --- --- For example: --- --- ```python indices[0] = 6 indices[1] = [4, 1] indices[2] = [[5, 2], [0, --- 3]] data[0] = [61, 62] data[1] = [[41, 42], [11, 12]] data[2] = [[[51, --- 52], [21, 22]], [[1, 2], [31, 32]]] merged = [[1, 2], [11, 12], [21, --- 22], [31, 32], [41, 42], [51, 52], [61, 62]] ``` --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/DynamicStitch.png" alt /div -dynamicStitch :: (TensorType t) => [Tensor v1 Int32] -> [Tensor v2 t] -> Tensor Value t - --- | Looks up keys in a table, outputs the corresponding values. --- --- The tensor keys must of the same type as the keys of the --- table. The output values is of the type of the table values. --- --- The scalar default_value is the value output for keys not --- present in the table. It must also be of the same type as the table --- values. -lookupTableFind :: (TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v2 tin -> Tensor v3 tout -> Build (Tensor Value tout) - --- | Generate a single randomly distorted bounding box for an image. --- --- Bounding box annotations are often supplied in addition to --- ground-truth labels in image recognition or object localization tasks. --- A common technique for training such a system is to randomly distort --- an image while preserving its content, i.e. *data augmentation*. This --- Op outputs a randomly distorted localization of an object, i.e. --- bounding box, given an image_size, bounding_boxes --- and a series of constraints. --- --- The output of this Op is a single bounding box that may be used to --- crop the original image. The output is returned as 3 tensors: --- begin, size and bboxes. The first 2 tensors --- can be fed directly into `tf.slice` to crop the image. The latter may --- be supplied to `tf.image.draw_bounding_boxes` to visualize what the --- bounding box looks like. --- --- Bounding boxes are supplied and returned as `[y_min, x_min, y_max, --- x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` --- relative to the width and height of the underlying image. --- --- For example, --- --- ```python # Generate a single distorted bounding box. begin, size, --- bbox_for_draw = tf.image.sample_distorted_bounding_box( --- tf.shape(image), bounding_boxes=bounding_boxes) --- --- # Draw the bounding box in an image summary. image_with_box = --- tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) --- tf.image_summary(images_with_box, image_with_box) --- --- # Employ the bounding box to distort the image. distorted_image = --- tf.slice(image, begin, size) ``` --- --- Note that if no bounding box information is available, setting --- `use_image_if_no_bounding_boxes = true` will assume there is a single --- implicit bounding box covering the whole image. If --- use_image_if_no_bounding_boxes is false and no bounding boxes --- are supplied, an error is raised. -sampleDistortedBoundingBox :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word8] t) => Tensor v1 t -> Tensor v2 Float -> Build ((Tensor Value t, Tensor Value t, Tensor Value Float)) - --- | Splits a tensor into num_split tensors along one dimension. -splitV :: (TensorType t, TensorType tlen, OneOf '[Int32, Int64] tlen) => Int64 -> Tensor v1 t -> Tensor v2 tlen -> Tensor v3 Int32 -> [Tensor Value t] - --- | Performs a padding as a preprocess during a convolution. --- --- Similar to FusedResizeAndPadConv2d, this op allows for an optimized --- implementation where the spatial padding transformation stage is fused --- with the im2col lookup, but in this case without the bilinear --- filtering required for resizing. Fusing the padding prevents the need --- to write out the intermediate results as whole tensors, reducing --- memory pressure, and we can get some latency gains by merging the --- transformation calculations. The data_format attribute for Conv2D --- isn't supported by this op, and NHWC order is used instead. --- Internally this op uses a single per-graph scratch buffer, which means --- that it will block if multiple versions are being run in parallel. --- This is because this operator is primarily an optimization to minimize --- memory usage. -fusedPadConv2D :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor v3 t -> Tensor Value t - --- | For each key, assigns the respective value to the specified component. --- --- If a key is not found in the barrier, this operation will create a new --- incomplete element. If a key is found in the barrier, and the element --- already has a value at component_index, this operation will fail with --- INVALID_ARGUMENT, and leave the barrier in an undefined state. -barrierInsertMany :: (TensorType t) => Int64 -> Tensor Ref ByteString -> Tensor v2 ByteString -> Tensor v3 t -> Build (ControlNode) - --- | Raise a exception to abort the process when called. --- --- Returns nothing but an exception. -abort :: ControlNode - --- | Performs max pooling on the input and outputs both max values and --- indices. --- --- The indices in argmax are flattened, so that a maximum value --- at position `[b, y, x, c]` becomes flattened index `((b * height + y) --- * width + x) * channels + c`. -maxPoolWithArgmax :: (TensorType targmax, OneOf '[Int32, Int64] targmax, TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value targmax) - --- | Creates or finds a child frame, and makes `data` available to the --- child frame. --- --- The unique frame_name is used by the Executor to --- identify frames. If is_constant is true, output is a --- constant in the child frame; otherwise it may be changed in the child --- frame. At most parallel_iterations iterations are run in --- parallel in the child frame. -refEnter :: (TensorType t) => Tensor Ref t -> Build (Tensor Ref t) - --- | Dequantize the input tensor into a float Tensor. --- ---
      ---
    • min_range, max_range are scalar floats that specify the --- range for the input data. The mode attribute --- controls exactly which calculations are used to convert the float --- values to their quantized equivalents.
    • ---
    --- --- In MIN_COMBINED mode, each value of the tensor will undergo --- the following: --- --- ``` if T == qint8, in[i] += (range(T) + 1)/ 2.0 out[i] = min_range + --- (in[i]* (max_range - min_range) / range(T)) ``` here `range(T) = --- numeric_limitsT::max() - numeric_limitsT::min()` --- ---
      ---
    • MIN_COMBINED Mode Example*
    • ---
    --- --- If the input comes from a QuantizedRelu6, the output type is quint8 --- (range of 0-255) but the possible range of QuantizedRelu6 is 0-6. The --- min_range and max_range values are therefore 0.0 and 6.0. Dequantize --- on quint8 will take each value, cast to float, and multiply by 6 / --- 255. Note that if quantizedtype is qint8, the operation will --- additionally add each value by 128 prior to casting. --- --- If the mode is MIN_FIRST, then this approach is used: --- --- ``` number_of_steps = 1 << (# of bits in T) range_adjust = --- number_of_steps / (number_of_steps - 1) range = (range_max - --- range_min) * range_adjust range_scale = range / number_of_steps const --- double offset_input = static_castdouble(input) - --- lowest_quantized; result = range_min + ((input - --- numeric_limitsT::min()) * range_scale) ``` -dequantize :: (TensorType t, OneOf '[Int16, Int32, Word16, Word8] t) => Tensor v1 t -> Tensor v2 Float -> Tensor v3 Float -> Tensor Value Float - --- | Draw bounding boxes on a batch of images. --- --- Outputs a copy of images but draws on top of the pixels zero --- or more bounding boxes specified by the locations in boxes. --- The coordinates of the each bounding box in boxes are encoded --- as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are --- floats in `[0.0, 1.0]` relative to the width and height of the --- underlying image. --- --- For example, if an image is 100 x 200 pixels and the bounding box is --- `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of --- the bounding box will be `(10, 40)` to `(50, 180)`. --- --- Parts of the bounding box may fall outside the image. -drawBoundingBoxes :: (TensorType t, OneOf '[Word16, Float] t) => Tensor v1 t -> Tensor v2 Float -> Tensor Value t -tensorArraySplit :: (TensorType t) => Tensor Ref ByteString -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Float -> Build (Tensor Value Float) - --- | Converts each string in the input Tensor to its hash mod by a number --- of buckets. --- --- The hash function is deterministic on the content of the string within --- the process and will never change. However, it is not suitable for --- cryptography. This function may be used when CPU time is scarce and --- inputs are trusted or unimportant. There is a risk of adversaries --- constructing inputs that all hash to the same bucket. To prevent this --- problem, use a strong hash function with --- `tf.string_to_hash_bucket_strong`. -stringToHashBucketFast :: Int64 -> Tensor v1 ByteString -> Tensor Value Int64 -tensorArrayScatter :: (TensorType t) => Tensor Ref ByteString -> Tensor v2 Int32 -> Tensor v3 t -> Tensor v4 Float -> Build (Tensor Value Float) - --- | Returns a one-hot tensor. --- --- The locations represented by indices in indices take value --- on_value, while all other locations take value --- off_value. --- --- If the input indices is rank N, the output will have --- rank `N+1`, The new axis is created at dimension axis --- (default: the new axis is appended at the end). --- --- If indices is a scalar the output shape will be a vector of --- length depth. --- --- If indices is a vector of length features, the --- output shape will be: ``` features x depth if axis == -1 depth x --- features if axis == 0 ``` --- --- If indices is a matrix (batch) with shape `[batch, --- features]`, the output shape will be: ``` batch x features x depth if --- axis == -1 batch x depth x features if axis == 1 depth x batch x --- features if axis == 0 ``` --- --- Examples ========= --- --- Suppose that --- --- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 --- axis = -1 ``` --- --- Then output is `[4 x 3]`: --- --- ```output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) --- [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ``` --- --- Suppose that --- --- ``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0 --- axis = 0 ``` --- --- Then output is `[3 x 4]`: --- --- ```output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0 --- 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^ --- one_hot(1) ``` Suppose that --- --- ``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value = --- 0.0 axis = -1 ``` --- --- Then output is `[2 x 2 x 3]`: --- --- ```output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // --- one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // --- one_hot(-1) ]``` -oneHot :: (TensorType t, TensorType tI, OneOf '[Int32, Int64, Word8] tI) => Tensor v1 tI -> Tensor v2 Int32 -> Tensor v3 t -> Tensor v4 t -> Tensor Value t -batchIFFT3D :: Tensor v1 (Complex Float) -> Tensor Value (Complex Float) - --- | Reinterpret the bytes of a string as a vector of numbers. -decodeRaw :: (TensorType out_type, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] out_type) => Tensor v1 ByteString -> Tensor Value out_type -tensorArrayPack :: (TensorType dtype) => Tensor Ref ByteString -> Tensor v2 Float -> Build (Tensor Value dtype) - --- | Update '*var' and '*accum' according to FOBOS with Adagrad learning --- rate. --- --- accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var --- = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} -applyProximalAdagrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor v3 t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Build (Tensor Ref t) - --- | Applies a sparse gradient to a given accumulator. Does not add if --- local_step is --- --- lesser than the accumulator's global_step. -sparseAccumulatorApplyGradient :: (TensorType dtype, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Bool -> Tensor Ref ByteString -> Tensor v2 Int64 -> Tensor v3 Int64 -> Tensor v4 dtype -> Tensor v5 Int64 -> Build (ControlNode) - --- | Returns x + y element-wise. --- ---
      ---
    • NOTE*: Add supports broadcasting. AddN does not. --- More about broadcasting here
    • ---
    -add :: (TensorType t, OneOf '[Complex Double, Complex Float, ByteString, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes softsign: `features / (abs(features) + 1)`. -softsign :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t -tensorArrayRead :: (TensorType dtype) => Tensor Ref ByteString -> Tensor v2 Int32 -> Tensor v3 Float -> Build (Tensor Value dtype) - --- | Applies sparse subtraction between updates and individual --- values or slices --- --- within a given variable according to indices. --- --- ref is a Tensor with rank P and --- indices is a Tensor of rank Q. --- --- indices must be integer tensor, containing indices into --- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < --- K <= P`. --- --- The innermost dimension of indices (with length K) --- corresponds to indices into elements (if `K = P`) or slices (if `K --- < P`) along the Kth dimension of ref. --- --- updates is Tensor of rank `Q-1+P-K` with shape: --- --- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` --- --- For example, say we want to subtract 4 scattered elements from a --- rank-1 tensor with 8 elements. In Python, that subtraction would look --- like this: --- --- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = --- tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, --- 12]) sub = tf.scatter_nd_sub(ref, indices, updates) with tf.Session() --- as sess: print sess.run(sub) --- --- The resulting update to ref would look like this: --- ---
      ---
    • 1, -9, 3, -6, -4, 6, 7, -4
    • ---
    --- --- See tf.scatter_nd for more details about how to make updates to --- slices. -scatterNdSub :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v2 tindices -> Tensor v3 t -> Build (Tensor Ref t) - --- | Restores a tensor from checkpoint files. --- --- This is like Restore except that restored tensor can be --- listed as filling only a slice of a larger tensor. --- shape_and_slice specifies the shape of the larger tensor and --- the slice that the restored tensor covers. --- --- The shape_and_slice input has the same format as the elements --- of the shapes_and_slices input of the SaveSlices op. -restoreSlice :: (TensorType dt) => Tensor v1 ByteString -> Tensor v2 ByteString -> Tensor v3 ByteString -> Tensor Value dt - --- | Update ref by adding value to it. --- --- This operation outputs "ref" after the update is done. This makes it --- easier to chain operations that need to use the reset value. -assignAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor v2 t -> Build (Tensor Ref t) - --- | Returns the truth value of (x > y) element-wise. --- ---
      ---
    • NOTE*: Greater supports broadcasting. More about --- broadcasting here
    • ---
    -greater :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool - --- | Returns the number of work units this Reader has finished processing. -readerNumWorkUnitsCompleted :: Tensor Ref ByteString -> Build (Tensor Value Int64) - --- | Gather specific elements from the TensorArray into output --- value. --- --- All elements selected by indices must have the same shape. -tensorArrayGatherV2 :: (TensorType dtype) => Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor v3 Float -> Tensor Value dtype - --- | Read an element from the TensorArray into output value. -tensorArrayReadV2 :: (TensorType dtype) => Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor v3 Float -> Tensor Value dtype - --- | Decode web-safe base64-encoded strings. --- --- Input may or may not have padding at the end. See EncodeBase64 for --- padding. Web-safe means that input must use - and _ instead of + and --- /. -decodeBase64 :: Tensor v1 ByteString -> Tensor Value ByteString - --- | Push an element onto the tensor_array. -tensorArrayWriteV2 :: (TensorType t) => Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor v3 t -> Tensor v4 Float -> Tensor Value Float - --- | Outputs a Summary protocol buffer with audio. --- --- The summary has up to max_outputs summary values containing --- audio. The audio is built from tensor which must be 3-D with --- shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, --- frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` --- with a sample rate of sample_rate. --- --- The tag argument is a scalar Tensor of type --- string. It is used to build the tag of the summary --- values: --- ---
      ---
    • If max_outputs is 1, the summary value tag is --- '*tag*/audio'.
    • ---
    • If max_outputs is greater than 1, the summary value tags --- are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', --- etc.
    • ---
    -audioSummary :: Float -> Tensor v1 ByteString -> Tensor v2 Float -> Tensor Value ByteString - --- | Returns which elements of x are finite. --- --- compatibility(numpy) Equivalent to np.isfinite --- end_compatibility -isFinite :: (TensorType t, OneOf '[Word16, Double, Float] t) => Tensor v1 t -> Tensor Value Bool -tensorArrayConcat :: (TensorType dtype) => Tensor Ref ByteString -> Tensor v2 Float -> Build ((Tensor Value dtype, Tensor Value Int64)) - --- | Computes the sum of elements across dimensions of a SparseTensor. --- --- This Op takes a SparseTensor and is the sparse counterpart to --- `tf.reduce_sum()`. In particular, this Op also returns a dense --- Tensor instead of a sparse one. --- --- Reduces sp_input along the dimensions given in --- reduction_axes. Unless keep_dims is true, the rank --- of the tensor is reduced by 1 for each entry in --- reduction_axes. If keep_dims is true, the reduced --- dimensions are retained with length 1. --- --- If reduction_axes has no entries, all dimensions are reduced, --- and a tensor with a single element is returned. Additionally, the axes --- can be negative, which are interpreted according to the indexing rules --- in Python. -sparseReduceSum :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int32 -> Tensor Value t - --- | Returns x / y element-wise for real types. --- --- If x and y are reals, this will return the --- floating-point division. --- ---
      ---
    • NOTE*: Div supports broadcasting. More about broadcasting --- here
    • ---
    -realDiv :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t -tensorArraySize :: Tensor Ref ByteString -> Tensor v2 Float -> Build (Tensor Value Int32) - --- | Adds bias to value. --- --- This is a deprecated version of BiasAdd and will be soon removed. --- --- This is a special case of `tf.add` where bias is restricted --- to be 1-D. Broadcasting is supported, so value may have any --- number of dimensions. -biasAddV1 :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Returns the truth value of x OR y element-wise. --- ---
      ---
    • NOTE*: LogicalOr supports broadcasting. More about --- broadcasting here
    • ---
    -logicalOr :: Tensor v1 Bool -> Tensor v2 Bool -> Tensor Value Bool - --- | Push an element onto the stack. -stackPush :: (TensorType t) => Tensor Ref ByteString -> Tensor v2 t -> Build (Tensor Value t) - --- | Computes Quantized Rectified Linear: `max(features, 0)` -quantizedRelu :: (TensorType tinput, OneOf '[Int16, Int32, Word16, Word8] tinput, TensorType out_type, OneOf '[Int16, Int32, Word16, Word8] out_type) => Tensor v1 tinput -> Tensor v2 Float -> Tensor v3 Float -> (Tensor Value out_type, Tensor Value Float, Tensor Value Float) - --- | Return the reduction indices for computing gradients of s0 op s1 with --- broadcast. --- --- This is typically used by gradient computations for a broadcasting --- operation. -broadcastGradientArgs :: (TensorType t, OneOf '[Int32, Int64] t) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value t) - --- | Finds unique elements in a 1-D tensor. --- --- This operation returns a tensor y containing all of the --- unique elements of x sorted in the same order that they occur --- in x. This operation also returns a tensor idx the --- same size as x that contains the index of each value of --- x in the unique output y. Finally, it returns a --- third tensor count that contains the count of each element of --- y in x. In other words: --- --- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` --- --- For example: --- --- ```prettyprint # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, --- idx, count = unique_with_counts(x) y ==> [1, 2, 4, 7, 8] idx ==> --- [0, 0, 1, 2, 2, 2, 3, 4, 4] count ==> [2, 1, 3, 1, 2] ``` -uniqueWithCounts :: (TensorType t, TensorType out_idx, OneOf '[Int32, Int64] out_idx) => Tensor v1 t -> (Tensor Value t, Tensor Value out_idx, Tensor Value out_idx) - --- | Returns element-wise remainder of division. This emulates C semantics --- where --- --- true, this follows C semantics in that the result here is consistent --- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. --- ---
      ---
    • NOTE*: Mod supports broadcasting. More about broadcasting --- here
    • ---
    -truncateMod :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Returns the gradient of StridedSlice. --- --- Since StridedSlice cuts out pieces of its input --- which is size shape, its gradient will have the same shape --- (which is passed here as shape). The gradient will be zero in --- any element that the slice does not select. --- --- Arguments are the same as StridedSliceGrad with the exception that --- dy is the input gradient to be propagated and shape is --- the shape of StridedSlice's input. -stridedSliceGrad :: (TensorType t, TensorType index, OneOf '[Int32, Int64] index) => Tensor v1 index -> Tensor v2 index -> Tensor v3 index -> Tensor v4 index -> Tensor v5 t -> Tensor Value t - --- | Performs fractional average pooling on the input. --- --- Fractional average pooling is similar to Fractional max pooling in the --- pooling region generation step. The only difference is that after --- pooling regions are generated, a mean operation is performed instead --- of a max operation in each pooling region. -fractionalAvgPool :: (TensorType t, OneOf '[Int32, Int64, Double, Float] t) => Tensor v1 t -> (Tensor Value t, Tensor Value Int64, Tensor Value Int64) - --- | Extracts the average sparse gradient in the given --- SparseConditionalAccumulator, --- --- provided that sufficient (i.e., more than num_required) gradients have --- been accumulated. The op will blocks until sufficient gradients have --- been accumulated. If the accumulator has already aggregated more than --- num_required gradients, it will return its average of the accumulated --- gradients. Also automatically increments the recorded global_step in --- the accumulator by 1, and resets the aggregate to 0. -sparseAccumulatorTakeGradient :: (TensorType dtype, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] dtype) => Tensor Ref ByteString -> Tensor v2 Int32 -> Build ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) - --- | Convert JSON-encoded Example records to binary protocol buffer --- strings. --- --- This op translates a tensor containing Example records, encoded using --- the standard JSON mapping, into a tensor containing the same --- records encoded as binary protocol buffers. The resulting tensor can --- then be fed to any of the other Example-parsing ops. -decodeJSONExample :: Tensor v1 ByteString -> Tensor Value ByteString - --- | A placeholder op that passes though input when its output is --- not fed. -placeholderWithDefault :: (TensorType dtype) => Shape -> Tensor v1 dtype -> Tensor Value dtype - --- | Update '*var' according to the Ftrl-proximal scheme. --- --- accum_new = accum + grad * grad linear += grad + --- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 --- / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - --- linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new -applyFtrl :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor Ref t -> Tensor Ref t -> Tensor Ref t -> Tensor v4 t -> Tensor v5 t -> Tensor v6 t -> Tensor v7 t -> Tensor v8 t -> Build (Tensor Ref t) - --- | Applies L1 regularization shrink step on the parameters. -sdcaShrinkL1 :: Float -> Float -> [Tensor Ref Float] -> Build (ControlNode) - --- | Generate a sharded filename. The filename is printf formatted as --- --- %s-%05d-of-%05d, basename, shard, num_shards. -shardedFilename :: Tensor v1 ByteString -> Tensor v2 Int32 -> Tensor v3 Int32 -> Tensor Value ByteString - --- | Fake-quantize the inputs tensor, type float to --- outputs tensor of same type. --- --- Attributes [min; max] define the clamping range for the --- inputs data. Op divides this range into 255 steps (total of --- 256 values), then replaces each inputs value with the closest --- of the quantized step values. --- --- Quantization is called fake since the output is still in floating --- point. -fakeQuantWithMinMaxArgs :: Tensor v1 Float -> Tensor Value Float - --- | Applies sparse addition between updates and individual values --- or slices --- --- within a given variable according to indices. --- --- ref is a Tensor with rank P and --- indices is a Tensor of rank Q. --- --- indices must be integer tensor, containing indices into --- ref. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < --- K <= P`. --- --- The innermost dimension of indices (with length K) --- corresponds to indices into elements (if `K = P`) or slices (if `K --- < P`) along the Kth dimension of ref. --- --- updates is Tensor of rank `Q-1+P-K` with shape: --- --- ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` --- --- For example, say we want to add 4 scattered elements to a rank-1 --- tensor to 8 elements. In Python, that addition would look like this: --- --- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = --- tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, --- 12]) add = tf.scatter_nd_add(ref, indices, updates) with tf.Session() --- as sess: print sess.run(add) --- --- The resulting update to ref would look like this: --- ---
      ---
    • 1, 13, 3, 14, 14, 6, 7, 20
    • ---
    --- --- See tf.scatter_nd for more details about how to make updates to --- slices. -scatterNdAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v2 tindices -> Tensor v3 t -> Build (Tensor Ref t) - --- | Returns the number of gradients aggregated in the given accumulators. -accumulatorNumAccumulated :: Tensor Ref ByteString -> Build (Tensor Value Int32) - --- | Computes the sum along sparse segments of a tensor divided by the sqrt --- of N. --- --- N is the size of the segment being reduced. --- --- Read the section on Segmentation for an explanation of --- segments. -sparseSegmentSqrtN :: (TensorType t, OneOf '[Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor v3 Int32 -> Tensor Value t - --- | DepthToSpace for tensors of type T. --- --- Rearranges data from depth into blocks of spatial data. This is the --- reverse transformation of SpaceToDepth. More specifically, this op --- outputs a copy of the input tensor where values from the --- depth dimension are moved in spatial blocks to the --- height and width dimensions. The attr --- block_size indicates the input block size and how the data is --- moved. --- ---
      ---
    • Chunks of data of size `block_size * block_size` from depth are --- rearranged into non-overlapping blocks of size `block_size x --- block_size`
    • ---
    • The width the output tensor is `input_depth * block_size`, whereas --- the height is `input_height * block_size`.
    • ---
    • The depth of the input tensor must be divisible by `block_size * --- block_size`.
    • ---
    --- --- That is, assuming the input is in the shape: `[batch, height, width, --- depth]`, the shape of the output will be: `[batch, height*block_size, --- width*block_size, depth/(block_size*block_size)]` --- --- This operation requires that the input tensor be of rank 4, and that --- block_size be >=1 and that `block_size * block_size` be a --- divisor of the input depth. --- --- This operation is useful for resizing the activations between --- convolutions (but keeping all data), e.g. instead of pooling. It is --- also useful for training purely convolutional models. --- --- For example, given this input of shape `[1, 1, 1, 4]`, and a block --- size of 2: --- --- ```prettyprint x = [[[[1, 2, 3, 4]]]] --- --- ``` --- --- This operation will output a tensor of shape `[1, 2, 2, 1]`: --- --- ```prettyprint [[[[1], [2]], [[3], [4]]]] ``` --- --- Here, the input has a batch of 1 and each batch element has shape `[1, --- 1, 4]`, the corresponding output will have 2x2 elements and will have --- a depth of 1 channel (1 = `4 / (block_size * block_size)`). The output --- element shape is `[2, 2, 1]`. --- --- For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, --- e.g. --- --- ```prettyprint x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ``` --- --- This operation, for block size of 2, will return the following tensor --- of shape `[1, 2, 2, 3]` --- --- ```prettyprint [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] --- --- ``` --- --- Similarly, for the following input of shape `[1 2 2 4]`, and a block --- size of 2: --- --- ```prettyprint x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], --- [13, 14, 15, 16]]]] ``` --- --- the operator will return the following tensor of shape `[1 4 4 1]`: --- --- ```prettyprint x = [[ [1], [2], [5], [6]], [ [3], [4], [7], [8]], [ --- [9], [10], [13], [14]], [ [11], [12], [15], [16]]] --- --- ``` -depthToSpace :: (TensorType t) => Int64 -> Tensor v1 t -> Tensor Value t - --- | Generates labels for candidate sampling with a learned unigram --- distribution. --- --- See explanations of candidate sampling and the data formats at --- go/candidate-sampling. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -allCandidateSampler :: Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) - --- | Computes the gradient of nearest neighbor interpolation. -resizeNearestNeighborGrad :: (TensorType t, OneOf '[Int32, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t - --- | Performs greedy decoding on the logits given in inputs. --- --- A note about the attribute merge_repeated: if enabled, when --- consecutive logits' maximum indices are the same, only the first of --- these is emitted. Labeling the blank *, the sequence "A B B * B --- B" becomes "A B" if merge_repeated = True and "A B B B B" if --- merge_repeated = False. --- --- Regardless of the value of merge_repeated, if the maximum index of a --- given time and batch corresponds to the blank, index `(num_classes - --- 1)`, no new element is emitted. -cTCGreedyDecoder :: Tensor v1 Float -> Tensor v2 Int32 -> (Tensor Value Int64, Tensor Value Int64, Tensor Value Int64, Tensor Value Float) - --- | L2 Loss. --- --- Computes half the L2 norm of a tensor without the sqrt: --- --- output = sum(t ** 2) / 2 -l2Loss :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Computes the maximum along segments of a tensor. --- --- Read the section on Segmentation for an explanation of --- segments. --- --- Computes a tensor such that \(output_i = max_j(data_j)\) where --- max is over j such that `segment_ids[j] == i`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/SegmentMax.png" alt /div -segmentMax :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 t -> Tensor v2 tindices -> Tensor Value t - --- | Increments ref until it reaches limit. -countUpTo :: (TensorType t, OneOf '[Int32, Int64] t) => Int64 -> Tensor Ref t -> Build (Tensor Value t) - --- | A Reader that outputs the records from a TensorFlow Records file. -tFRecordReader :: Build (Tensor Ref ByteString) - --- | Forwards `data` to the output port determined by pred. --- --- If pred is true, the `data` input is forwarded to --- output_true. Otherwise, the data goes to --- output_false. --- --- See also RefSwitch and Merge. -switch :: (TensorType t) => Tensor v1 t -> Tensor v2 Bool -> (Tensor Value t, Tensor Value t) - --- | Computes gradients for SparseSegmentMean. --- --- Returns tensor "output" with same shape as grad, except for dimension --- 0 whose value is output_dim0. -sparseSegmentMeanGrad :: (TensorType t, OneOf '[Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor v3 Int32 -> Tensor v4 Int32 -> Tensor Value t - --- | Gather values or slices from params according to --- indices. --- --- params is a Tensor of rank P and indices is --- a Tensor of rank Q. --- --- indices must be integer tensor, containing indices into --- params. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 --- < K <= P`. --- --- The innermost dimension of indices (with length K) --- corresponds to indices into elements (if `K = P`) or slices (if `K --- < P`) along the Kth dimension of params. --- --- Produces an output tensor with shape --- --- ``` [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]]. ``` --- --- Some examples below. --- --- Simple indexing into a matrix: --- --- ```python indices = [[0, 0], [1, 1]] params = [[a, --- b], [c, d]] output = [a, --- d] ``` --- --- Slice indexing into a matrix: --- --- ```python indices = [[1], [0]] params = [[a, b], --- [c, d]] output = [[c, d], --- [a, b]] ``` --- --- Indexing into a 3-tensor: --- --- ```python indices = [[1]] params = [[[a0, b0], --- [c0, d0]], [[a1, b1], --- [c1, d1]]] output = [[[a1, b1], --- [c1, d1]]] --- --- indices = [[0, 1], [1, 0]] params = [[[a0, b0], --- [c0, d0]], [[a1, b1], --- [c1, d1]]] output = [[c0, d0], --- [a1, b1]] --- --- indices = [[0, 0, 1], [1, 0, 1]] params = [[[a0, --- b0], [c0, d0]], [[a1, --- b1], [c1, d1]]] output = [b0, --- b1] ``` --- --- Batched indexing into a matrix: --- --- ```python indices = [[[0, 0]], [[0, 1]]] params = [[a, --- b], [c, d]] output = [[a], --- [b]] ``` --- --- Batched slice indexing into a matrix: --- --- ```python indices = [[[1]], [[0]]] params = [[a, b], --- [c, d]] output = [[[c, d]], --- [[a, b]]] ``` --- --- Batched indexing into a 3-tensor: --- --- ```python indices = [[[1]], [[0]]] params = [[[a0, --- b0], [c0, d0]], [[a1, --- b1], [c1, d1]]] output = [[[[a1, --- b1], [c1, d1]]], [[[a0, --- b0], [c0, d0]]]] --- --- indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] params = --- [[[a0, b0], [c0, d0]], --- [[a1, b1], [c1, d1]]] output = --- [[[c0, d0], [a1, b1]], --- [[a0, b0], [c1, d1]]] --- --- indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] params = --- [[[a0, b0], [c0, d0]], --- [[a1, b1], [c1, d1]]] output = --- [[b0, b1], [d0, c1]] ``` -gatherNd :: (TensorType tparams, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 tparams -> Tensor v2 tindices -> Tensor Value tparams - --- | Removes dimensions of size 1 from the shape of a tensor. --- --- Given a tensor input, this operation returns a tensor of the --- same type with all dimensions of size 1 removed. If you don't want to --- remove all size 1 dimensions, you can remove specific size 1 --- dimensions by specifying squeeze_dims. --- --- For example: --- --- ```prettyprint # t is a tensor of shape [1, 2, 1, 3, 1, 1] --- shape(squeeze(t)) ==> [2, 3] ``` --- --- Or, to remove specific size 1 dimensions: --- --- ```prettyprint # t is a tensor of shape [1, 2, 1, 3, 1, 1] --- shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] ``` -squeeze :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | Outputs random values from a uniform distribution. --- --- The generated values follow a uniform distribution in the range `[0, --- 1)`. The lower bound 0 is included in the range, while the upper bound --- 1 is excluded. -randomUniform :: (TensorType dtype, OneOf '[Word16, Double, Float] dtype, TensorType t, OneOf '[Int32, Int64] t) => Tensor v1 t -> Build (Tensor Value dtype) - --- | Returns up to num_records (key, value) pairs produced by a --- Reader. --- --- Will dequeue from the input queue if necessary (e.g. when the Reader --- needs to start reading from a new file since it has finished with the --- previous file). It may return less than num_records even --- before the last batch. -readerReadUpTo :: Tensor Ref ByteString -> Tensor Ref ByteString -> Tensor v3 Int64 -> Build ((Tensor Value ByteString, Tensor Value ByteString)) - --- | Computes the gradients of 3-D convolution with respect to the input. -conv3DBackpropInput :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor v3 t -> Tensor Value t - --- | Computes a 2-D depthwise convolution given 4-D input and --- filter tensors. --- --- Given an input tensor of shape `[batch, in_height, in_width, --- in_channels]` and a filter / kernel tensor of shape `[filter_height, --- filter_width, in_channels, channel_multiplier]`, containing --- in_channels convolutional filters of depth 1, --- depthwise_conv2d applies a different filter to each input --- channel (expanding from 1 channel to channel_multiplier --- channels for each), then concatenates the results together. Thus, the --- output has `in_channels * channel_multiplier` channels. --- --- for k in 0..in_channels-1 for q in 0..channel_multiplier-1 output[b, --- i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * --- i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] --- --- Must have `strides[0] = strides[3] = 1`. For the most common case of --- the same horizontal and vertices strides, `strides = [1, stride, --- stride, 1]`. -depthwiseConv2dNative :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Generates labels for candidate sampling with a learned unigram --- distribution. --- --- See explanations of candidate sampling and the data formats at --- go/candidate-sampling. --- --- For each batch, this op picks a single set of sampled candidate --- labels. --- --- The advantages of sampling candidates per-batch are simplicity and the --- possibility of efficient dense matrix multiplication. The disadvantage --- is that the sampled candidates must be chosen independently of the --- context and of the true labels. -learnedUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v1 Int64 -> (Tensor Value Int64, Tensor Value Float, Tensor Value Float) - --- | Table initializer that takes two tensors for keys and values --- respectively. -initializeTable :: (TensorType tkey, TensorType tval) => Tensor Ref ByteString -> Tensor v2 tkey -> Tensor v3 tval -> Build (ControlNode) - --- | Forwards the value of an available tensor from inputs to --- output. --- --- Merge waits for at least one of the tensors in --- inputs to become available. It is usually combined with --- Switch to implement branching. --- --- Merge forwards the first tensor for become available to --- output, and sets value_index to its index in --- inputs. -merge :: (TensorType t) => [Tensor v1 t] -> (Tensor Value t, Tensor Value Int32) - --- | Forwards the value of an available tensor from inputs to --- output. --- --- Merge waits for at least one of the tensors in --- inputs to become available. It is usually combined with --- Switch to implement branching. --- --- Merge forwards the first tensor for become available to --- output, and sets value_index to its index in --- inputs. -refMerge :: (TensorType t) => [Tensor Ref t] -> Build ((Tensor Ref t, Tensor Value Int32)) - --- | Rounds the values of a tensor to the nearest integer, element-wise. --- --- Rounds half to even. Also known as bankers rounding. If you want to --- round according to the current system rounding mode use std::cint. -round :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v1 t -> Tensor Value t -batchSelfAdjointEig :: (TensorType t, OneOf '[Double, Float] t) => Tensor v1 t -> Tensor Value t - --- | Partitions `data` into num_partitions tensors using indices --- from partitions. --- --- For each index tuple js of size `partitions.ndim`, the slice --- `data[js, ...]` becomes part of `outputs[partitions[js]]`. The slices --- with `partitions[js] = i` are placed in `outputs[i]` in lexicographic --- order of js, and the first dimension of `outputs[i]` is the --- number of entries in partitions equal to i. In --- detail, --- --- ```python outputs[i].shape = [sum(partitions == i)] + --- data.shape[partitions.ndim:] --- --- outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) ``` --- --- `data.shape` must start with `partitions.shape`. --- --- For example: --- --- ```python # Scalar partitions. partitions = 1 num_partitions = 2 data --- = [10, 20] outputs[0] = [] # Empty with shape [0, 2] outputs[1] = --- [[10, 20]] --- --- # Vector partitions. partitions = [0, 0, 1, 1, 0] num_partitions = 2 --- data = [10, 20, 30, 40, 50] outputs[0] = [10, 20, 50] outputs[1] = --- [30, 40] ``` --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/DynamicPartition.png" alt /div -dynamicPartition :: (TensorType t) => Int64 -> Tensor v1 t -> Tensor v2 Int32 -> [Tensor Value t] - --- | Reshapes a tensor. --- --- Given tensor, this operation returns a tensor that has the --- same values as tensor with shape shape. --- --- If one component of shape is the special value -1, the size of --- that dimension is computed so that the total size remains constant. In --- particular, a shape of `[-1]` flattens into 1-D. At most one --- component of shape can be -1. --- --- If shape is 1-D or higher, then the operation returns a tensor --- with shape shape filled with the values of tensor. In --- this case, the number of elements implied by shape must be the --- same as the number of elements in tensor. --- --- For example: --- --- ```prettyprint # tensor t is [1, 2, 3, 4, 5, 6, 7, 8, 9] # --- tensor t has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3], --- [4, 5, 6], [7, 8, 9]] --- --- # tensor t is [[[1, 1], [2, 2]], # [[3, 3], [4, 4]]] # tensor --- t has shape [2, 2, 2] reshape(t, [2, 4]) ==> [[1, 1, 2, --- 2], [3, 3, 4, 4]] --- --- # tensor t is [[[1, 1, 1], # [2, 2, 2]], # [[3, 3, 3], # [4, --- 4, 4]], # [[5, 5, 5], # [6, 6, 6]]] # tensor t has shape [3, --- 2, 3] # pass '[-1]' to flatten t reshape(t, [-1]) ==> [1, --- 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] --- --- # -1 can also be used to infer the shape --- --- # -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, --- 2, 3, 3, 3], [4, 4, 4, 5, 5, 5, 6, 6, 6]] # -1 is inferred to be 2: --- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], [4, 4, 4, 5, --- 5, 5, 6, 6, 6]] # -1 is inferred to be 3: reshape(t, [ 2, -1, 3]) --- ==> [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, --- 6, 6]]] --- --- # tensor t is [7] # shape `[]` reshapes to a scalar --- reshape(t, []) ==> 7 ``` -reshape :: (TensorType t, TensorType tshape, OneOf '[Int32, Int64] tshape) => Tensor v1 t -> Tensor v2 tshape -> Tensor Value t - --- | A Reader that outputs fixed-length records from a file. -fixedLengthRecordReader :: Int64 -> Build (Tensor Ref ByteString) - --- | Distributed version of Stochastic Dual Coordinate Ascent (SDCA) --- optimizer for --- --- linear models with L1 + L2 regularization. As global optimization --- objective is strongly-convex, the optimizer optimizes the dual --- objective at each step. The optimizer applies each update one example --- at a time. Examples are sampled uniformly, and the optimizer is --- learning rate free and enjoys linear convergence rate. --- --- Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai; --- Zhang, Tong. 2012 arXiv1211.2717S: --- http://arxiv.org/pdf/1211.2717v1.pdf --- --- Loss objective = sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w| --- --- Adding vs. Averaging in Distributed Primal-Dual Optimization. Chenxin --- Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, --- Martin Takac http://arxiv.org/abs/1502.03508 --- --- Stochastic Dual Coordinate Ascent with Adaptive Probabilities Dominik --- Csiba, Zheng Qu, Peter Richtarik --- https://arxiv.org/abs/1502.08053 -sdcaOptimizer :: Float -> Float -> Int64 -> Int64 -> [Tensor v1 Int64] -> [Tensor v2 Int64] -> [Tensor v3 Float] -> [Tensor v4 Float] -> Tensor v5 Float -> Tensor v6 Float -> [Tensor v7 Int64] -> [Tensor v8 Float] -> [Tensor v9 Float] -> Tensor v10 Float -> (Tensor Value Float, [Tensor Value Float], [Tensor Value Float]) - --- | Resize images to size using area interpolation. --- --- Input images can be of different types but output images are always --- float. -resizeArea :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value Float - --- | Generates values in an interval. --- --- A sequence of num evenly-spaced values are generated --- beginning at start. If `num > 1`, the values in the --- sequence increase by `stop - start / num - 1`, so that the last one is --- exactly stop. --- --- For example: --- --- ``` tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 --- 12.0] ``` -linSpace :: (TensorType t, OneOf '[Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 t -> Tensor v3 tidx -> Tensor Value t - --- | Calculates the CTC Loss (log probability) for each batch entry. Also --- calculates --- --- the gradient. This class performs the softmax operation for you, so --- inputs should be e.g. linear projections of outputs by an LSTM. -cTCLoss :: Tensor v1 Float -> Tensor v2 Int64 -> Tensor v3 Int32 -> Tensor v4 Int32 -> (Tensor Value Float, Tensor Value Float) - --- | Returns the batched diagonal part of a batched tensor. --- --- This operation returns a tensor with the diagonal part of the --- batched input. The diagonal part is computed as --- follows: --- --- Assume input has k dimensions `[I, J, K, ..., M, --- N]`, then the output is a tensor of rank `k - 1` with dimensions `[I, --- J, K, ..., min(M, N)]` where: --- --- `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. --- --- The input must be at least a matrix. --- --- For example: --- --- ```prettyprint # input is [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, --- 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, --- 8]]] --- --- and input.shape = (2, 4, 4) --- --- tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] --- --- which has shape (2, 4) ``` -matrixDiagPart :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | Creates or finds a child frame, and makes `data` available to the --- child frame. --- --- This op is used together with Exit to create loops in the --- graph. The unique frame_name is used by the Executor --- to identify frames. If is_constant is true, output --- is a constant in the child frame; otherwise it may be changed in the --- child frame. At most parallel_iterations iterations are run --- in parallel in the child frame. -enter :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | PNG-encode an image. --- --- image is a 3-D uint8 or uint16 Tensor of shape `[height, --- width, channels]` where channels is: --- ---
      ---
    • 1: for grayscale.
    • ---
    • 2: for grayscale + alpha.
    • ---
    • 3: for RGB.
    • ---
    • 4: for RGBA.
    • ---
    --- --- The ZLIB compression level, compression, can be -1 for the --- PNG-encoder default or a value from 0 to 9. 9 is the highest --- compression level, generating the smallest output, but is slower. -encodePng :: (TensorType t, OneOf '[Word16, Word8] t) => Tensor v1 t -> Tensor Value ByteString - --- | Exits the current frame to its parent frame. --- --- Exit makes its input `data` available to the parent frame. -exit :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | Creates a new tensor by applying sparse updates to individual --- --- values or slices within a zero tensor of the given shape tensor --- according to indices. This operator is the inverse of the --- tf.gather_nd operator which extracts values or slices from a --- given tensor. --- --- TODO(simister): Add a link to Variable.getitem documentation on --- slice syntax. --- --- shape is a TensorShape with rank P and --- indices is a Tensor of rank Q. --- --- indices must be integer tensor, containing indices into --- shape. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < --- K <= P`. --- --- The innermost dimension of indices (with length K) --- corresponds to indices into elements (if `K = P`) or slices (if `K --- < P`) along the Kth dimension of shape. --- --- updates is Tensor of rank `Q-1+P-K` with shape: --- --- ``` [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]]. ``` --- --- The simplest form of scatter is to insert individual elements in a --- tensor by index. For example, say we want to insert 4 scattered --- elements in a rank-1 tensor with 8 elements. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterNd1.png" alt /div --- --- In Python, this scatter operation would look like this: --- --- indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, --- 10, 11, 12]) shape = tf.constant([8]) scatter = tf.scatter_nd(indices, --- updates, shape) with tf.Session() as sess: print sess.run(scatter) --- --- The resulting tensor would look like this: --- ---
      ---
    • 0, 11, 0, 10, 9, 0, 0, 12
    • ---
    --- --- We can also, insert entire slices of a higher rank tensor all at once. --- For example, if we wanted to insert two slices in the first dimension --- of a rank-3 tensor with two matrices of new values. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterNd2.png" alt /div --- --- In Python, this scatter operation would look like this: --- --- indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5, --- 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, --- 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) shape = tf.constant([4, 4, 4]) --- scatter = tf.scatter_nd(indices, updates, shape) with tf.Session() as --- sess: print sess.run(scatter) --- --- The resulting tensor would look like this: --- ---
      ---
    • [[5, 5, 5, 5 , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, --- 8]],
    • ---
    • [0, 0, 0, 0 , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, --- 0]],
    • ---
    • [5, 5, 5, 5 , [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, --- 8]],
    • ---
    • [0, 0, 0, 0 , [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, --- 0]]]
    • ---
    -scatterNd :: (TensorType t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor v1 tindices -> Tensor v2 t -> Tensor v3 tindices -> Tensor Value t - --- | A queue that produces elements sorted by the first component value. --- --- Note that the PriorityQueue requires the first component of any --- element to be a scalar int64, in addition to the other elements --- declared by component_types. Therefore calls to Enqueue and --- EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will --- all require (resp. output) one extra entry in their input (resp. --- output) lists. -priorityQueue :: Build (Tensor Ref ByteString) - --- | Forwards the ref tensor `data` to the output port determined by --- pred. --- --- If pred is true, the `data` input is forwarded to --- output_true. Otherwise, the data goes to --- output_false. --- --- See also Switch and Merge. -refSwitch :: (TensorType t) => Tensor Ref t -> Tensor v2 Bool -> Build ((Tensor Ref t, Tensor Ref t)) - --- | Makes its input available to the next iteration. -nextIteration :: (TensorType t) => Tensor v1 t -> Tensor Value t - --- | Makes its input available to the next iteration. -refNextIteration :: (TensorType t) => Tensor Ref t -> Build (Tensor Ref t) - --- | Multiplies slices of two tensors in batches. --- --- Multiplies all slices of Tensor x and y (each --- slice can be viewed as an element of a batch), and arranges the --- individual results in a single output tensor of the same batch size. --- Each of the individual slices can optionally be adjointed (to adjoint --- a matrix means to transpose and conjugate it) before multiplication by --- setting the adj_x or adj_y flag to True, --- which are by default False. --- --- The input tensors x and y are 3-D or higher with --- shape `[..., r_x, c_x]` and `[..., r_y, c_y]`. --- --- The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, --- where: --- --- r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y --- --- It is computed as: --- --- output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) -batchMatMul :: (TensorType t, OneOf '[Complex Double, Complex Float, Int32, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Forwards the indexth element of inputs to --- output. -refSelect :: (TensorType t) => Tensor v1 Int32 -> [Tensor Ref t] -> Build (Tensor Ref t) - --- | Computes the mean of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -mean :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tidx, OneOf '[Int32, Int64] tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t - --- | Adds sparse updates to a variable reference. --- --- This operation computes --- --- # Scalar indices ref[indices, ...] += updates[...] --- --- # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] --- --- # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] --- += updates[i, ..., j, ...] --- --- This operation outputs ref after the update is done. This --- makes it easier to chain operations that need to use the reset value. --- --- Duplicate entries are handled correctly: if multiple indices --- reference the same location, their contributions add. --- --- Requires `updates.shape = indices.shape + ref.shape[1:]`. --- --- style="width:70%; margin:auto; margin-bottom:10px; --- margin-top:20px;" style="width:100%" --- src="../../images/ScatterAdd.png" alt /div -scatterAdd :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, TensorType tindices, OneOf '[Int32, Int64] tindices) => Tensor Ref t -> Tensor v2 tindices -> Tensor v3 t -> Build (Tensor Ref t) - --- | Randomly crop image. --- --- size is a 1-D int64 tensor with 2 elements representing the --- crop height and width. The values must be non negative. --- --- This Op picks a random location in image and crops a --- height by width rectangle from that location. The --- random location is picked so the cropped area will fit inside the --- original image. -randomCrop :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word8, Double, Float] t) => Tensor v1 t -> Tensor v2 Int64 -> Build (Tensor Value t) - --- | Exits the current frame to its parent frame. --- --- Exit makes its input `data` available to the parent frame. -refExit :: (TensorType t) => Tensor Ref t -> Build (Tensor Ref t) - --- | Produce a string tensor that encodes the state of a Reader. --- --- Not all Readers support being serialized, so this can produce an --- Unimplemented error. -readerSerializeState :: Tensor Ref ByteString -> Build (Tensor Value ByteString) - --- | Computes the gradient for the tanh of x wrt its input. --- --- Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and --- dy is the corresponding input gradient. -tanhGrad :: (TensorType t, OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Returns the element-wise max of two SparseTensors. --- --- Assumes the two SparseTensors have the same shape, i.e., no --- broadcasting. -sparseSparseMaximum :: (TensorType t, OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 Int64 -> Tensor v5 t -> Tensor v6 Int64 -> (Tensor Value Int64, Tensor Value t) - --- | Decode the first frame of a GIF-encoded image to a uint8 tensor. --- --- GIF with frame or transparency compression are not supported convert --- animated GIF from compressed to uncompressed by: --- --- convert $src.gif -coalesce $dst.gif -decodeGif :: Tensor v1 ByteString -> Tensor Value Word8 +sub :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +sub' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Return substrings from Tensor of strings. -- @@ -5631,26 +6402,681 @@ decodeGif :: Tensor v1 ByteString -> Tensor Value Word8 -- ``` input = bthirteen position = [1, 5, 7] length = [3, 2, 1] -- -- output = [bhir, bee, b'n"] ``` -substr :: (TensorType t, OneOf '[Int32, Int64] t) => Tensor v1 ByteString -> Tensor v2 t -> Tensor v3 t -> Tensor Value ByteString +substr :: (OneOf '[Int32, Int64] t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build ByteString +substr' :: (OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 t -> Tensor Build ByteString --- | Updates the table to associates keys with values. +-- | Computes the sum of elements across dimensions of a tensor. -- --- The tensor keys must be of the same type as the keys of the --- table. The tensor values must be of the type of the table --- values. -lookupTableInsert :: (TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v2 tin -> Tensor v3 tout -> Build (ControlNode) +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +sum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t +sum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t --- | Component-wise divides a SparseTensor by a dense Tensor. +-- | Computes the singular value decompositions of one or more matrices. +-- +-- Computes the SVD of each inner matrix in input such that +-- `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * +-- transpose(v[..., :, :])` +-- +-- ```prettyprint # a is a tensor containing a batch of matrices. # s is +-- a tensor of singular values for each matrix. # u is the tensor +-- containing of left singular vectors for each matrix. # v is the tensor +-- containing of right singular vectors for each matrix. s, u, v = svd(a) +-- s, _, _ = svd(a, compute_uv=False) ``` +svd :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) +svd' :: (OneOf '[Complex Double, Complex Float, Double, Float] t) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build t, Tensor Build t) + +-- | Forwards `data` to the output port determined by pred. +-- +-- If pred is true, the `data` input is forwarded to +-- output_true. Otherwise, the data goes to +-- output_false. +-- +-- See also RefSwitch and Merge. +switch :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Bool -> (Tensor Build t, Tensor Build t) +switch' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Bool -> (Tensor Build t, Tensor Build t) + +-- | A Reader that outputs the records from a TensorFlow Records file. +tFRecordReader :: (MonadBuild m') => m' (Tensor Ref ByteString) +tFRecordReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) + +-- | A Reader that outputs the records from a TensorFlow Records file. +tFRecordReaderV2 :: (MonadBuild m') => m' (ResourceHandle) +tFRecordReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) + +-- | Read SparseTensors from a SparseTensorsMap and +-- concatenate them. +-- +-- The input sparse_handles must be an int64 matrix of +-- shape `[N, 1]` where N is the minibatch size and the rows +-- correspond to the output handles of AddSparseToTensorsMap or +-- AddManySparseToTensorsMap. The ranks of the original +-- SparseTensor objects that went into the given input ops must +-- all match. When the final SparseTensor is created, it has +-- rank one higher than the ranks of the incoming SparseTensor +-- objects (they have been concatenated along a new row dimension on the +-- left). +-- +-- The output SparseTensor object's shape values for all +-- dimensions but the first are the max across the input +-- SparseTensor objects' shape values for the corresponding +-- dimensions. Its first shape value is N, the minibatch size. +-- +-- The input SparseTensor objects' indices are assumed ordered +-- in standard lexicographic order. If this is not the case, after this +-- step run SparseReorder to restore index ordering. +-- +-- For example, if the handles represent an input, which is a `[2, 3]` +-- matrix representing two original SparseTensor objects: +-- +-- ``` index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] ``` +-- +-- and +-- +-- ``` index = [ 2] [10] values = [4, 5] shape = [30] ``` +-- +-- then the final SparseTensor will be: +-- +-- ``` index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] +-- shape = [2 50] ``` +takeManySparseFromTensorsMap :: (MonadBuild m', TensorType dtype) => Tensor v'1 Int64 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) +takeManySparseFromTensorsMap' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 Int64 -> m' ((Tensor Value Int64, Tensor Value dtype, Tensor Value Int64)) + +-- | Computes tan of x element-wise. +tan :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +tan' :: (OneOf '[Complex Double, Complex Float, Int32, Int64, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes hyperbolic tangent of x element-wise. +tanh :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor Build t +tanh' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Computes the gradient for the tanh of x wrt its input. +-- +-- Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and +-- dy is the corresponding input gradient. +tanhGrad :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +tanhGrad' :: (OneOf '[Complex Double, Complex Float, Word16, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Returns a tensor that may be mutated, but only persists within a +-- single step. +-- +-- This is an experimental op for internal use only and it is possible to +-- use this op in unsafe ways. DO NOT USE unless you fully understand the +-- risks. +-- +-- It is the caller's responsibility to ensure that ref is +-- eventually passed to a matching DestroyTemporaryVariable op +-- after all other uses have completed. +-- +-- Outputs a ref to the tensor state so it may be read or modified. +-- +-- E.g. var = state_ops._temporary_variable([1, 2], types.float_) +-- var_name = var.op.name var = state_ops.assign(var, [[4.0, 5.0]]) var = +-- state_ops.assign_add(var, [[6.0, 7.0]]) final = +-- state_ops._destroy_temporary_variable(var, var_name=var_name) +temporaryVariable :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) +temporaryVariable' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) +tensorArray :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' (Tensor Ref ByteString) +tensorArray' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' (Tensor Ref ByteString) +tensorArrayClose :: (MonadBuild m') => Tensor Ref ByteString -> m' (ControlNode) +tensorArrayClose' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> m' (ControlNode) + +-- | Deprecated. Use TensorArrayCloseV3 +tensorArrayCloseV2 :: (MonadBuild m') => Tensor v'1 ByteString -> m' (ControlNode) +tensorArrayCloseV2' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> m' (ControlNode) + +-- | Delete the TensorArray from its resource container. This enables +-- +-- the user to close and release the resource in the middle of a +-- step/run. +tensorArrayCloseV3 :: (MonadBuild m') => ResourceHandle -> m' (ControlNode) +tensorArrayCloseV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (ControlNode) +tensorArrayConcat :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) +tensorArrayConcat' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) + +-- | Deprecated. Use TensorArrayConcatV3 +tensorArrayConcatV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Float -> (Tensor Build dtype, Tensor Build Int64) +tensorArrayConcatV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> (Tensor Build dtype, Tensor Build Int64) + +-- | Concat the elements from the TensorArray into value value. +-- +-- Takes T elements of shapes +-- +-- ``` (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 +-- x ...) ``` +-- +-- and concatenates them into a Tensor of shape: +-- +-- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` +-- +-- All elements must have the same shape (excepting the first dimension). +tensorArrayConcatV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) +tensorArrayConcatV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' ((Tensor Value dtype, Tensor Value Int64)) +tensorArrayGather :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) +tensorArrayGather' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) + +-- | Deprecated. Use TensorArrayGatherV3 +tensorArrayGatherV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype +tensorArrayGatherV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype + +-- | Gather specific elements from the TensorArray into output +-- value. +-- +-- All elements selected by indices must have the same shape. +tensorArrayGatherV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) +tensorArrayGatherV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) +tensorArrayGrad :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Ref ByteString) +tensorArrayGrad' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Ref ByteString) + +-- | Deprecated. Use TensorArrayGradV3 +tensorArrayGradV2 :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Value ByteString) +tensorArrayGradV2' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> m' (Tensor Value ByteString) + +-- | Creates a TensorArray for storing the gradients of values in the given +-- handle. +-- +-- If the given TensorArray gradient already exists, returns a reference +-- to it. +-- +-- Locks the size of the original TensorArray by disabling its dynamic +-- size flag. -- --
      ---
    • Limitation*: this Op only broadcasts the dense side to the sparse --- side, but not the other direction.
    • +--
    • *A note about the input flow_in:**
    • --
    -sparseDenseCwiseDiv :: (TensorType t, OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v1 Int64 -> Tensor v2 t -> Tensor v3 Int64 -> Tensor v4 t -> Tensor Value t - --- | Replaces the contents of the table with the specified keys and values. -- --- The tensor keys must be of the same type as the keys of the --- table. The tensor values must be of the type of the table --- values. -lookupTableImport :: (TensorType tin, TensorType tout) => Tensor Ref ByteString -> Tensor v2 tin -> Tensor v3 tout -> Build (ControlNode) +-- The handle flow_in forces the execution of the gradient lookup to +-- occur only after certain other operations have occurred. For example, +-- when the forward TensorArray is dynamically sized, writes to this +-- TensorArray may resize the object. The gradient TensorArray is +-- statically sized based on the size of the forward TensorArray when +-- this operation executes. Furthermore, the size of the forward +-- TensorArray is frozen by this call. As a result, the flow is used to +-- ensure that the call to generate the gradient TensorArray only happens +-- after all writes are executed. +-- +-- In the case of dynamically sized TensorArrays, gradient computation +-- should only be performed on read operations that have themselves been +-- chained via flow to occur only after all writes have executed. That +-- way the final size of the forward TensorArray is known when this +-- operation is called. +-- +--
      +--
    • *A note about the source attribute:**
    • +--
    +-- +-- TensorArray gradient calls use an accumulator TensorArray object. If +-- multiple gradients are calculated and run in the same session, the +-- multiple gradient nodes may accidentally flow throuth the same +-- accumulator TensorArray. This double counts and generally breaks the +-- TensorArray gradient flow. +-- +-- The solution is to identify which gradient call this particular +-- TensorArray gradient is being called in. This is performed by +-- identifying a unique string (e.g. "gradients", "gradients_1", ...) +-- from the input gradient Tensor's name. This string is used as a suffix +-- when creating the TensorArray gradient object here (the attribute +-- source). +-- +-- The attribute source is added as a suffix to the forward +-- TensorArray's name when performing the creation / lookup, so that each +-- separate gradient calculation gets its own TensorArray accumulator. +tensorArrayGradV3 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 Float -> m' ((ResourceHandle, Tensor Value Float)) +tensorArrayGradV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' ((ResourceHandle, Tensor Value Float)) +tensorArrayPack :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype) +tensorArrayPack' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value dtype) +tensorArrayRead :: (MonadBuild m', TensorType dtype) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) +tensorArrayRead' :: (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) + +-- | Deprecated. Use TensorArrayReadV3 +tensorArrayReadV2 :: (TensorType dtype) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype +tensorArrayReadV2' :: (TensorType dtype) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 Float -> Tensor Build dtype + +-- | Read an element from the TensorArray into output value. +tensorArrayReadV3 :: (MonadBuild m', TensorType dtype) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) +tensorArrayReadV3' :: (MonadBuild m', TensorType dtype) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 Float -> m' (Tensor Value dtype) +tensorArrayScatter :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) +tensorArrayScatter' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) + +-- | Deprecated. Use TensorArrayScatterV3 +tensorArrayScatterV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float +tensorArrayScatterV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float + +-- | Scatter the data from the input value into specific TensorArray +-- elements. +-- +-- indices must be a vector, its length must match the first dim +-- of value. +tensorArrayScatterV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) +tensorArrayScatterV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) +tensorArraySize :: (MonadBuild m') => Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value Int32) +tensorArraySize' :: (MonadBuild m') => OpParams -> Tensor Ref ByteString -> Tensor v'2 Float -> m' (Tensor Value Int32) + +-- | Deprecated. Use TensorArraySizeV3 +tensorArraySizeV2 :: Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build Int32 +tensorArraySizeV2' :: OpParams -> Tensor v'1 ByteString -> Tensor v'2 Float -> Tensor Build Int32 + +-- | Get the current size of the TensorArray. +tensorArraySizeV3 :: (MonadBuild m') => ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value Int32) +tensorArraySizeV3' :: (MonadBuild m') => OpParams -> ResourceHandle -> Tensor v'2 Float -> m' (Tensor Value Int32) +tensorArraySplit :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) +tensorArraySplit' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) + +-- | Deprecated. Use TensorArraySplitV3 +tensorArraySplitV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> Tensor Build Float +tensorArraySplitV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> Tensor Build Float + +-- | Split the data from the input value into TensorArray elements. +-- +-- Assuming that lengths takes on values +-- +-- ```(n0, n1, ..., n(T-1))``` +-- +-- and that value has shape +-- +-- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```, +-- +-- this splits values into a TensorArray with T tensors. +-- +-- TensorArray index t will be the subtensor of values with starting +-- position +-- +-- ```(n0 + n1 + ... + n(t-1), 0, 0, ...)``` +-- +-- and having size +-- +-- ```nt x d0 x d1 x ...``` +tensorArraySplitV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) +tensorArraySplitV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 t -> Tensor v'3 Int64 -> Tensor v'4 Float -> m' (Tensor Value Float) +tensorArrayUnpack :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Float -> m' (Tensor Value Float) +tensorArrayUnpack' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 t -> Tensor v'3 Float -> m' (Tensor Value Float) + +-- | Deprecated. Use TensorArrayV3 +tensorArrayV2 :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' (Tensor Value ByteString) +tensorArrayV2' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' (Tensor Value ByteString) + +-- | An array of Tensors of given size, with data written via Write and +-- read +-- +-- via Read or Pack. +tensorArrayV3 :: (MonadBuild m') => DataType -> Tensor v'1 Int32 -> m' ((ResourceHandle, Tensor Value Float)) +tensorArrayV3' :: (MonadBuild m') => OpParams -> DataType -> Tensor v'1 Int32 -> m' ((ResourceHandle, Tensor Value Float)) +tensorArrayWrite :: (MonadBuild m', TensorType t) => Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) +tensorArrayWrite' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) + +-- | Deprecated. Use TensorArrayGradV3 +tensorArrayWriteV2 :: (TensorType t) => Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float +tensorArrayWriteV2' :: (TensorType t) => OpParams -> Tensor v'1 ByteString -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> Tensor Build Float + +-- | Push an element onto the tensor_array. +tensorArrayWriteV3 :: (MonadBuild m', TensorType t) => ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) +tensorArrayWriteV3' :: (MonadBuild m', TensorType t) => OpParams -> ResourceHandle -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 Float -> m' (Tensor Value Float) + +-- | Outputs a Summary protocol buffer with a tensor. +tensorSummary :: (TensorType t) => Tensor v'1 t -> Tensor Build ByteString +tensorSummary' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build ByteString + +-- | A Reader that outputs the lines of a file delimited by '\n'. +textLineReader :: (MonadBuild m') => m' (Tensor Ref ByteString) +textLineReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) + +-- | A Reader that outputs the lines of a file delimited by '\n'. +textLineReaderV2 :: (MonadBuild m') => m' (ResourceHandle) +textLineReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) + +-- | Generates labels for candidate sampling with a learned unigram +-- distribution. +-- +-- See explanations of candidate sampling and the data formats at +-- go/candidate-sampling. +-- +-- For each batch, this op picks a single set of sampled candidate +-- labels. +-- +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +threadUnsafeUnigramCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) +threadUnsafeUnigramCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) + +-- | Constructs a tensor by tiling a given tensor. +-- +-- This operation creates a new tensor by replicating input +-- multiples times. The output tensor's i'th dimension has +-- `input.dims(i) * multiples[i]` elements, and the values of +-- input are replicated `multiples[i]` times along the +-- ith dimension. For example, tiling `[a b c d]` by `[2]` +-- produces `[a b c d a b c d]`. +tile :: (TensorType t, OneOf '[Int32, Int64] tmultiples) => Tensor v'1 t -> Tensor v'2 tmultiples -> Tensor Build t +tile' :: (TensorType t, OneOf '[Int32, Int64] tmultiples) => OpParams -> Tensor v'1 t -> Tensor v'2 tmultiples -> Tensor Build t + +-- | Returns the gradient of Tile. +-- +-- Since Tile takes an input and repeats the input +-- multiples times along each dimension, TileGrad takes +-- in multiples and aggregates each repeated tile of +-- input into output. +tileGrad :: (TensorType t) => Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t +tileGrad' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> Tensor Build t + +-- | Finds values and indices of the k largest elements for the +-- last dimension. +-- +-- If the input is a vector (rank-1), finds the k largest +-- entries in the vector and outputs their values and indices as vectors. +-- Thus `values[j]` is the j-th largest entry in input, +-- and its index is `indices[j]`. +-- +-- For matrices (resp. higher rank input), computes the top k +-- entries in each row (resp. vector along the last dimension). Thus, +-- +-- values.shape = indices.shape = input.shape[:-1] + [k] +-- +-- If two elements are equal, the lower-index element appears first. +-- +-- If k varies dynamically, use TopKV2 below. +topK :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Int64 -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int32) +topK' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Int64 -> Tensor v'1 t -> (Tensor Build t, Tensor Build Int32) + +-- | Finds values and indices of the k largest elements for the +-- last dimension. +-- +-- If the input is a vector (rank-1), finds the k largest +-- entries in the vector and outputs their values and indices as vectors. +-- Thus `values[j]` is the j-th largest entry in input, +-- and its index is `indices[j]`. +-- +-- For matrices (resp. higher rank input), computes the top k +-- entries in each row (resp. vector along the last dimension). Thus, +-- +-- values.shape = indices.shape = input.shape[:-1] + [k] +-- +-- If two elements are equal, the lower-index element appears first. +topKV2 :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 Int32 -> (Tensor Build t, Tensor Build Int32) +topKV2' :: (OneOf '[Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 Int32 -> (Tensor Build t, Tensor Build Int32) + +-- | Shuffle dimensions of x according to a permutation. +-- +-- The output y has the same rank as x. The shapes of +-- x and y satisfy: `y.shape[i] == x.shape[perm[i]] for +-- i in [0, 1, ..., rank(x) - 1]` +transpose :: (TensorType t, OneOf '[Int32, Int64] tperm) => Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t +transpose' :: (TensorType t, OneOf '[Int32, Int64] tperm) => OpParams -> Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t + +-- | Returns x / y element-wise for integer types. +-- +-- Truncation designates that negative numbers will round fractional +-- quantities toward zero. I.e. -7 / 5 = 1. This matches C semantics but +-- it is different than Python semantics. See FloorDiv for a +-- division function that matches Python Semantics. +-- +--
      +--
    • NOTE*: TruncateDiv supports broadcasting. More about +-- broadcasting here
    • +--
    +truncateDiv :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +truncateDiv' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Returns element-wise remainder of division. This emulates C semantics +-- where +-- +-- true, this follows C semantics in that the result here is consistent +-- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. +-- +--
      +--
    • NOTE*: Mod supports broadcasting. More about broadcasting +-- here
    • +--
    +truncateMod :: (OneOf '[Int32, Int64, Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +truncateMod' :: (OneOf '[Int32, Int64, Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Outputs random values from a truncated normal distribution. +-- +-- The generated values follow a normal distribution with mean 0 and +-- standard deviation 1, except that values whose magnitude is more than +-- 2 standard deviations from the mean are dropped and re-picked. +truncatedNormal :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => Tensor v'1 t -> m' (Tensor Value dtype) +truncatedNormal' :: (MonadBuild m', OneOf '[Word16, Double, Float] dtype, OneOf '[Int32, Int64] t) => OpParams -> Tensor v'1 t -> m' (Tensor Value dtype) + +-- | Generates labels for candidate sampling with a uniform distribution. +-- +-- See explanations of candidate sampling and the data formats at +-- go/candidate-sampling. +-- +-- For each batch, this op picks a single set of sampled candidate +-- labels. +-- +-- The advantages of sampling candidates per-batch are simplicity and the +-- possibility of efficient dense matrix multiplication. The disadvantage +-- is that the sampled candidates must be chosen independently of the +-- context and of the true labels. +uniformCandidateSampler :: Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) +uniformCandidateSampler' :: OpParams -> Int64 -> Int64 -> Int64 -> Bool -> Tensor v'1 Int64 -> (Tensor Build Int64, Tensor Build Float, Tensor Build Float) + +-- | Finds unique elements in a 1-D tensor. +-- +-- This operation returns a tensor y containing all of the +-- unique elements of x sorted in the same order that they occur +-- in x. This operation also returns a tensor idx the +-- same size as x that contains the index of each value of +-- x in the unique output y. In other words: +-- +-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` +-- +-- For example: +-- +-- ```prettyprint # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, +-- idx = unique(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, +-- 3, 4, 4] ``` +unique :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx) +unique' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx) + +-- | Finds unique elements in a 1-D tensor. +-- +-- This operation returns a tensor y containing all of the +-- unique elements of x sorted in the same order that they occur +-- in x. This operation also returns a tensor idx the +-- same size as x that contains the index of each value of +-- x in the unique output y. Finally, it returns a +-- third tensor count that contains the count of each element of +-- y in x. In other words: +-- +-- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` +-- +-- For example: +-- +-- ```prettyprint # tensor x is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, +-- idx, count = unique_with_counts(x) y ==> [1, 2, 4, 7, 8] idx ==> +-- [0, 0, 1, 2, 2, 2, 3, 4, 4] count ==> [2, 1, 3, 1, 2] ``` +uniqueWithCounts :: (TensorType t, OneOf '[Int32, Int64] out_idx) => Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) +uniqueWithCounts' :: (TensorType t, OneOf '[Int32, Int64] out_idx) => OpParams -> Tensor v'1 t -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) + +-- | Unpacks a given dimension of a rank-R tensor into +-- num rank-`(R-1)` tensors. +-- +-- Unpacks num tensors from value by chipping it along +-- the axis dimension. For example, given a tensor of shape `(A, +-- B, C, D)`; +-- +-- If `axis == 0` then the i'th tensor in output is the slice +-- `value[i, :, :, :]` and each tensor in output will have shape +-- `(B, C, D)`. (Note that the dimension unpacked along is gone, unlike +-- split). +-- +-- If `axis == 1` then the i'th tensor in output is the slice +-- `value[:, i, :, :]` and each tensor in output will have shape +-- `(A, C, D)`. Etc. +-- +-- This is the opposite of pack. +unpack :: (TensorType t) => Int64 -> Tensor v'1 t -> [Tensor Build t] +unpack' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> [Tensor Build t] + +-- | Computes the sum along segments of a tensor. +-- +-- Read the section on Segmentation for an explanation of +-- segments. +-- +-- Computes a tensor such that `(output[i] = sum_{j...} data[j...]` where +-- the sum is over tuples `j...` such that `segment_ids[j...] == i`. +-- Unlike SegmentSum, segment_ids need not be sorted +-- and need not cover all values in the full range of valid values. +-- +-- If the sum is empty for a given segment ID i, `output[i] = +-- 0`. +-- +-- num_segments should equal the number of distinct segment IDs. +-- +-- style="width:70%; margin:auto; margin-bottom:10px; +-- margin-top:20px;" style="width:100%" +-- src="../../images/UnsortedSegmentSum.png" alt /div +unsortedSegmentSum :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => Tensor v'1 t -> Tensor v'2 tindices -> Tensor v'3 Int32 -> Tensor Build t +unsortedSegmentSum' :: (OneOf '[Complex Double, Complex Float, Int16, Int32, Int64, Int8, Word16, Word8, Double, Float] t, OneOf '[Int32, Int64] tindices) => OpParams -> Tensor v'1 t -> Tensor v'2 tindices -> Tensor v'3 Int32 -> Tensor Build t + +-- | Op is similar to a lightweight Dequeue. The basic funtionality is +-- similar to +-- +-- dequeue with many fewer capabilities and options. This Op is optimized +-- for performance. +unstage :: (MonadBuild m', TensorTypes dtypes) => m' (TensorList (Value) dtypes) +unstage' :: (MonadBuild m', TensorTypes dtypes) => OpParams -> m' (TensorList (Value) dtypes) + +-- | Creates a handle to a Variable resource. +varHandleOp :: (MonadBuild m') => DataType -> Shape -> m' (ResourceHandle) +varHandleOp' :: (MonadBuild m') => OpParams -> DataType -> Shape -> m' (ResourceHandle) + +-- | Checks whether a resource handle-based variable has been initialized. +varIsInitializedOp :: (MonadBuild m') => ResourceHandle -> m' (Tensor Value Bool) +varIsInitializedOp' :: (MonadBuild m') => OpParams -> ResourceHandle -> m' (Tensor Value Bool) + +-- | Use VariableV2 instead. +variable :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) +variable' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) + +-- | Holds state in the form of a tensor that persists across steps. +-- +-- Outputs a ref to the tensor state so it may be read or modified. +-- TODO(zhifengc/mrry): Adds a pointer to a more detail document about +-- sharing states in tensorflow. +variableV2 :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) +variableV2' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) + +-- | Returns locations of true values in a boolean tensor. +-- +-- This operation returns the coordinates of true elements in +-- input. The coordinates are returned in a 2-D tensor where the +-- first dimension (rows) represents the number of true elements, and the +-- second dimension (columns) represents the coordinates of the true +-- elements. Keep in mind, the shape of the output tensor can vary +-- depending on how many true values there are in input. Indices +-- are output in row-major order. +-- +-- For example: +-- +-- ```prettyprint # input tensor is [[True, False] # [True, +-- False]] # input has two true values, so output has two +-- coordinates. # input has rank of 2, so coordinates have two +-- indices. where(input) ==> [[0, 0], [1, 0]] +-- +-- # input tensor is [[[True, False] # [True, False]] # [[False, +-- True] # [False, True]] # [[False, False] # [False, True]]] # +-- input has 5 true values, so output has 5 coordinates. # +-- input has rank of 3, so coordinates have three indices. +-- where(input) ==> [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, +-- 1, 1]] ``` +where' :: Tensor v'1 Bool -> Tensor Build Int64 +where'' :: OpParams -> Tensor v'1 Bool -> Tensor Build Int64 + +-- | A Reader that outputs the entire contents of a file as a value. +-- +-- To use, enqueue filenames in a Queue. The output of ReaderRead will be +-- a filename (key) and the contents of that file (value). +wholeFileReader :: (MonadBuild m') => m' (Tensor Ref ByteString) +wholeFileReader' :: (MonadBuild m') => OpParams -> m' (Tensor Ref ByteString) + +-- | A Reader that outputs the entire contents of a file as a value. +-- +-- To use, enqueue filenames in a Queue. The output of ReaderRead will be +-- a filename (key) and the contents of that file (value). +wholeFileReaderV2 :: (MonadBuild m') => m' (ResourceHandle) +wholeFileReaderV2' :: (MonadBuild m') => OpParams -> m' (ResourceHandle) + +-- | Writes contents to the file at input filename. Creates file if not +-- existing. +writeFile :: (MonadBuild m') => Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) +writeFile' :: (MonadBuild m') => OpParams -> Tensor v'1 ByteString -> Tensor v'2 ByteString -> m' (ControlNode) + +-- | Returns a tensor of zeros with the same shape and type as x. +zerosLike :: (TensorType t) => Tensor v'1 t -> Tensor Build t +zerosLike' :: (TensorType t) => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Compute the Hurwitz zeta function \(zeta(x, q)\). +-- +-- The Hurwitz zeta function is defined as: +-- +-- ``` zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x} ``` +zeta :: (OneOf '[Double, Float] t) => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +zeta' :: (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | A graph node which represents an argument to a function. +_Arg :: (MonadBuild m', TensorType t) => Int64 -> m' (Tensor Value t) +_Arg' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> m' (Tensor Value t) + +-- | Converts an array of tensors to a list of tensors. +_ArrayToList :: (TensorType t, TensorTypes out_types) => [Tensor v'1 t] -> TensorList (Build) out_types +_ArrayToList' :: (TensorType t, TensorTypes out_types) => OpParams -> [Tensor v'1 t] -> TensorList (Build) out_types + +-- | Cast x of type SrcT to y of DstT. +-- +-- _HostCast requires its input and produces its output in host memory. +_HostCast :: (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT +_HostCast' :: (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT + +-- | Receives the named tensor from send_device on recv_device. +-- +-- _HostRecv requires its input on host memory whereas _Recv requires its +-- input on device memory. +_HostRecv :: (MonadBuild m', TensorType tensor_type) => Int64 -> m' (Tensor Value tensor_type) +_HostRecv' :: (MonadBuild m', TensorType tensor_type) => OpParams -> Int64 -> m' (Tensor Value tensor_type) + +-- | Sends the named tensor from send_device to recv_device. +-- +-- _HostSend requires its input on host memory whereas _Send requires its +-- input on device memory. +_HostSend :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode) +_HostSend' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode) + +-- | Converts a list of tensors to an array of tensors. +_ListToArray :: (TensorTypes tin, TensorType t) => Int64 -> TensorList (v'1) tin -> [Tensor Build t] +_ListToArray' :: (TensorTypes tin, TensorType t) => OpParams -> Int64 -> TensorList (v'1) tin -> [Tensor Build t] + +-- | Creates an empty Tensor with shape shape and type +-- dtype. +-- +-- The memory can optionally be initialized. This is usually useful in +-- conjunction with inplace operations. +_ParallelConcatStart :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Value dtype) +_ParallelConcatStart' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Value dtype) + +-- | Updates input value at loc with update. +-- +-- If you use this function you will almost certainly want to add a +-- control dependency as done in the implementation of parallel_stack to +-- avoid race conditions. +_ParallelConcatUpdate :: (TensorType t) => Int64 -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +_ParallelConcatUpdate' :: (TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Receives the named tensor from send_device on recv_device. +_Recv :: (MonadBuild m', TensorType tensor_type) => Int64 -> m' (Tensor Value tensor_type) +_Recv' :: (MonadBuild m', TensorType tensor_type) => OpParams -> Int64 -> m' (Tensor Value tensor_type) + +-- | A graph node which represents a return value of a function. +_Retval :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode) +_Retval' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode) + +-- | Sends the named tensor from send_device to recv_device. +_Send :: (MonadBuild m', TensorType t) => Int64 -> Tensor v'1 t -> m' (ControlNode) +_Send' :: (MonadBuild m', TensorType t) => OpParams -> Int64 -> Tensor v'1 t -> m' (ControlNode) diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/TensorFlow-Logging.html b/docs/haddock/tensorflow-logging-0.1.0.0/TensorFlow-Logging.html new file mode 100644 index 0000000..17fa71a --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/TensorFlow-Logging.html @@ -0,0 +1,21 @@ +TensorFlow.Logging

    tensorflow-logging-0.1.0.0: TensorBoard related functionality.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Logging

    Description

    TensorBoard Summary generation. Provides type safe wrappers around raw + string emitting CoreOps.

    Example use:

    -- Call summary functions while constructing the graph.
    +createModel = do
    +  loss <- -- ...
    +  TF.scalarSummary loss
    +
    +-- Write summaries to an EventWriter.
    +train = TF.withEventWriter "/path/to/logs" $ \eventWriter -> do
    +    summaryTensor <- TF.build TF.allSummaries
    +    forM_ [1..] $ \step -> do
    +        if (step % 100 == 0)
    +            then do
    +                ((), summaryBytes) <- TF.run (trainStep, summaryTensor)
    +                let summary = decodeMessageOrDie (TF.unScalar summaryBytes)
    +                TF.logSummary eventWriter step summary
    +            else TF.run_ trainStep

    Documentation

    data EventWriter

    Handle for logging TensorBoard events safely from multiple threads.

    withEventWriter

    Arguments

    :: (MonadIO m, MonadMask m) 
    => FilePath

    logdir. Local filesystem directory where event file will be written.

    -> (EventWriter -> m a) 
    -> m a 

    Writes Event protocol buffers to event files.

    logEvent :: MonadIO m => EventWriter -> Event -> m ()

    Logs the given Event protocol buffer.

    logSummary :: MonadIO m => EventWriter -> Int64 -> Summary -> m ()

    Logs the given Summary event with an optional global step (use 0 if not + applicable).

    type SummaryTensor = Tensor Value ByteString

    Synonym for the tensors that return serialized Summary proto.

    histogramSummary :: (MonadBuild m, TensorType t, t /= ByteString, t /= Bool) => ByteString -> Tensor v t -> m ()

    Adds a histogramSummary node. The tag argument is intentionally + limited to a single value for simplicity.

    scalarSummary :: (TensorType t, t /= ByteString, t /= Bool, MonadBuild m) => ByteString -> Tensor v t -> m ()

    Adds a scalarSummary node.

    mergeAllSummaries :: MonadBuild m => m SummaryTensor

    Merge all summaries accumulated in the Build into one summary.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-logging-0.1.0.0/doc-index.html new file mode 100644 index 0000000..08e92a2 --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/doc-index.html @@ -0,0 +1,4 @@ +tensorflow-logging-0.1.0.0: TensorBoard related functionality. (Index)

    tensorflow-logging-0.1.0.0: TensorBoard related functionality.

    Index

    EventWriterTensorFlow.Logging
    histogramSummaryTensorFlow.Logging
    logEventTensorFlow.Logging
    logSummaryTensorFlow.Logging
    mergeAllSummariesTensorFlow.Logging
    scalarSummaryTensorFlow.Logging
    SummaryTensorTensorFlow.Logging
    withEventWriterTensorFlow.Logging
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/frames.html b/docs/haddock/tensorflow-logging-0.1.0.0/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-logging-0.1.0.0/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "
      " + btns + "
    " + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-logging-0.1.0.0/hslogo-16.png new file mode 100644 index 0000000..0ff8579 Binary files /dev/null and b/docs/haddock/tensorflow-logging-0.1.0.0/hslogo-16.png differ diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-logging-0.1.0.0/index-frames.html new file mode 100644 index 0000000..0357595 --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/index-frames.html @@ -0,0 +1,4 @@ +tensorflow-logging-0.1.0.0: TensorBoard related functionality. \ No newline at end of file diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/index.html b/docs/haddock/tensorflow-logging-0.1.0.0/index.html new file mode 100644 index 0000000..4a5d4ad --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/index.html @@ -0,0 +1,4 @@ +tensorflow-logging-0.1.0.0: TensorBoard related functionality.

    tensorflow-logging-0.1.0.0: TensorBoard related functionality.

    tensorflow-logging-0.1.0.0: TensorBoard related functionality.

    Please see README.md

    Modules

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/mini_TensorFlow-Logging.html b/docs/haddock/tensorflow-logging-0.1.0.0/mini_TensorFlow-Logging.html new file mode 100644 index 0000000..67af259 --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/mini_TensorFlow-Logging.html @@ -0,0 +1,4 @@ +TensorFlow.Logging

    TensorFlow.Logging

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/minus.gif b/docs/haddock/tensorflow-logging-0.1.0.0/minus.gif new file mode 100644 index 0000000..1deac2f Binary files /dev/null and b/docs/haddock/tensorflow-logging-0.1.0.0/minus.gif differ diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/ocean.css b/docs/haddock/tensorflow-logging-0.1.0.0/ocean.css new file mode 100644 index 0000000..1110b40 --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/ocean.css @@ -0,0 +1,600 @@ +/* @group Fundamentals */ + +* { margin: 0; padding: 0 } + +/* Is this portable? */ +html { + background-color: white; + width: 100%; + height: 100%; +} + +body { + background: white; + color: black; + text-align: left; + min-height: 100%; + position: relative; +} + +p { + margin: 0.8em 0; +} + +ul, ol { + margin: 0.8em 0 0.8em 2em; +} + +dl { + margin: 0.8em 0; +} + +dt { + font-weight: bold; +} +dd { + margin-left: 2em; +} + +a { text-decoration: none; } +a[href]:link { color: rgb(196,69,29); } +a[href]:visited { color: rgb(171,105,84); } +a[href]:hover { text-decoration:underline; } + +/* @end */ + +/* @group Fonts & Sizes */ + +/* Basic technique & IE workarounds from YUI 3 + For reasons, see: + http://yui.yahooapis.com/3.1.1/build/cssfonts/fonts.css + */ + +body { + font:13px/1.4 sans-serif; + *font-size:small; /* for IE */ + *font:x-small; /* for IE in quirks mode */ +} + +h1 { font-size: 146.5%; /* 19pt */ } +h2 { font-size: 131%; /* 17pt */ } +h3 { font-size: 116%; /* 15pt */ } +h4 { font-size: 100%; /* 13pt */ } +h5 { font-size: 100%; /* 13pt */ } + +select, input, button, textarea { + font:99% sans-serif; +} + +table { + font-size:inherit; + font:100%; +} + +pre, code, kbd, samp, tt, .src { + font-family:monospace; + *font-size:108%; + line-height: 124%; +} + +.links, .link { + font-size: 85%; /* 11pt */ +} + +#module-header .caption { + font-size: 182%; /* 24pt */ +} + +.info { + font-size: 85%; /* 11pt */ +} + +#table-of-contents, #synopsis { + /* font-size: 85%; /* 11pt */ +} + + +/* @end */ + +/* @group Common */ + +.caption, h1, h2, h3, h4, h5, h6 { + font-weight: bold; + color: rgb(78,98,114); + margin: 0.8em 0 0.4em; +} + +* + h1, * + h2, * + h3, * + h4, * + h5, * + h6 { + margin-top: 2em; +} + +h1 + h2, h2 + h3, h3 + h4, h4 + h5, h5 + h6 { + margin-top: inherit; +} + +ul.links { + list-style: none; + text-align: left; + float: right; + display: inline-table; + margin: 0 0 0 1em; +} + +ul.links li { + display: inline; + border-left: 1px solid #d5d5d5; + white-space: nowrap; + padding: 0; +} + +ul.links li a { + padding: 0.2em 0.5em; +} + +.hide { display: none; } +.show { display: inherit; } +.clear { clear: both; } + +.collapser { + background-image: url(minus.gif); + background-repeat: no-repeat; +} +.expander { + background-image: url(plus.gif); + background-repeat: no-repeat; +} +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} +.collapser, .expander { + padding-left: 14px; + margin-left: -14px; + cursor: pointer; +} + +pre { + padding: 0.25em; + margin: 0.8em 0; + background: rgb(229,237,244); + overflow: auto; + border-bottom: 0.25em solid white; + /* white border adds some space below the box to compensate + for visual extra space that paragraphs have between baseline + and the bounding box */ +} + +.src { + background: #f0f0f0; + padding: 0.2em 0.5em; +} + +.keyword { font-weight: normal; } +.def { font-weight: bold; } + + +/* @end */ + +/* @group Page Structure */ + +#content { + margin: 0 auto; + padding: 0 2em 6em; +} + +#package-header { + background: rgb(41,56,69); + border-top: 5px solid rgb(78,98,114); + color: #ddd; + padding: 0.2em; + position: relative; + text-align: left; +} + +#package-header .caption { + background: url(hslogo-16.png) no-repeat 0em; + color: white; + margin: 0 2em; + font-weight: normal; + font-style: normal; + padding-left: 2em; +} + +#package-header a:link, #package-header a:visited { color: white; } +#package-header a:hover { background: rgb(78,98,114); } + +#module-header .caption { + color: rgb(78,98,114); + font-weight: bold; + border-bottom: 1px solid #ddd; +} + +table.info { + float: right; + padding: 0.5em 1em; + border: 1px solid #ddd; + color: rgb(78,98,114); + background-color: #fff; + max-width: 40%; + border-spacing: 0; + position: relative; + top: -0.5em; + margin: 0 0 0 2em; +} + +.info th { + padding: 0 1em 0 0; +} + +div#style-menu-holder { + position: relative; + z-index: 2; + display: inline; +} + +#style-menu { + position: absolute; + z-index: 1; + overflow: visible; + background: #374c5e; + margin: 0; + text-align: center; + right: 0; + padding: 0; + top: 1.25em; +} + +#style-menu li { + display: list-item; + border-style: none; + margin: 0; + padding: 0; + color: #000; + list-style-type: none; +} + +#style-menu li + li { + border-top: 1px solid #919191; +} + +#style-menu a { + width: 6em; + padding: 3px; + display: block; +} + +#footer { + background: #ddd; + border-top: 1px solid #aaa; + padding: 0.5em 0; + color: #666; + text-align: center; + position: absolute; + bottom: 0; + width: 100%; + height: 3em; +} + +/* @end */ + +/* @group Front Matter */ + +#table-of-contents { + float: right; + clear: right; + background: #faf9dc; + border: 1px solid #d8d7ad; + padding: 0.5em 1em; + max-width: 20em; + margin: 0.5em 0 1em 1em; +} + +#table-of-contents .caption { + text-align: center; + margin: 0; +} + +#table-of-contents ul { + list-style: none; + margin: 0; +} + +#table-of-contents ul ul { + margin-left: 2em; +} + +#description .caption { + display: none; +} + +#synopsis { + display: none; +} + +.no-frame #synopsis { + display: block; + position: fixed; + right: 0; + height: 80%; + top: 10%; + padding: 0; + max-width: 75%; +} + +#synopsis .caption { + float: left; + width: 29px; + color: rgba(255,255,255,0); + height: 110px; + margin: 0; + font-size: 1px; + padding: 0; +} + +#synopsis p.caption.collapser { + background: url(synopsis.png) no-repeat -64px -8px; +} + +#synopsis p.caption.expander { + background: url(synopsis.png) no-repeat 0px -8px; +} + +#synopsis ul { + height: 100%; + overflow: auto; + padding: 0.5em; + margin: 0; +} + +#synopsis ul ul { + overflow: hidden; +} + +#synopsis ul, +#synopsis ul li.src { + background-color: #faf9dc; + white-space: nowrap; + list-style: none; + margin-left: 0; +} + +/* @end */ + +/* @group Main Content */ + +#interface div.top { margin: 2em 0; } +#interface h1 + div.top, +#interface h2 + div.top, +#interface h3 + div.top, +#interface h4 + div.top, +#interface h5 + div.top { + margin-top: 1em; +} +#interface p.src .link { + float: right; + color: #919191; + border-left: 1px solid #919191; + background: #f0f0f0; + padding: 0 0.5em 0.2em; + margin: 0 -0.5em 0 0.5em; +} + +#interface td.src .link { + float: right; + color: #919191; + border-left: 1px solid #919191; + background: #f0f0f0; + padding: 0 0.5em 0.2em; + margin: 0 -0.5em 0 0.5em; +} + +#interface span.fixity { + color: #919191; + border-left: 1px solid #919191; + padding: 0.2em 0.5em 0.2em 0.5em; + margin: 0 -1em 0 1em; +} + +#interface span.rightedge { + border-left: 1px solid #919191; + padding: 0.2em 0 0.2em 0; + margin: 0 0 0 1em; +} + +#interface table { border-spacing: 2px; } +#interface td { + vertical-align: top; + padding-left: 0.5em; +} +#interface td.src { + white-space: nowrap; +} +#interface td.doc p { + margin: 0; +} +#interface td.doc p + p { + margin-top: 0.8em; +} + +.clearfix:after { + clear: both; + content: " "; + display: block; + height: 0; + visibility: hidden; +} + +.subs dl { + margin: 0; +} + +.subs dt { + float: left; + clear: left; + display: block; + margin: 1px 0; +} + +.subs dd { + float: right; + width: 90%; + display: block; + padding-left: 0.5em; + margin-bottom: 0.5em; +} + +.subs dd.empty { + display: none; +} + +.subs dd p { + margin: 0; +} + +/* Render short-style data instances */ +.inst ul { + height: 100%; + padding: 0.5em; + margin: 0; +} + +.inst, .inst li { + list-style: none; + margin-left: 1em; +} + +/* Workaround for bug in Firefox (issue #384) */ +.inst-left { + float: left; +} + +.top p.src { + border-top: 1px solid #ccc; +} + +.subs, .doc { + /* use this selector for one level of indent */ + padding-left: 2em; +} + +.warning { + color: red; +} + +.arguments { + margin-top: -0.4em; +} +.arguments .caption { + display: none; +} + +.fields { padding-left: 1em; } + +.fields .caption { display: none; } + +.fields p { margin: 0 0; } + +/* this seems bulky to me +.methods, .constructors { + background: #f8f8f8; + border: 1px solid #eee; +} +*/ + +/* @end */ + +/* @group Auxillary Pages */ + + +.extension-list { + list-style-type: none; + margin-left: 0; +} + +#mini { + margin: 0 auto; + padding: 0 1em 1em; +} + +#mini > * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/plus.gif b/docs/haddock/tensorflow-logging-0.1.0.0/plus.gif new file mode 100644 index 0000000..2d15c14 Binary files /dev/null and b/docs/haddock/tensorflow-logging-0.1.0.0/plus.gif differ diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-logging-0.1.0.0/synopsis.png new file mode 100644 index 0000000..85fb86e Binary files /dev/null and b/docs/haddock/tensorflow-logging-0.1.0.0/synopsis.png differ diff --git a/docs/haddock/tensorflow-logging-0.1.0.0/tensorflow-logging.txt b/docs/haddock/tensorflow-logging-0.1.0.0/tensorflow-logging.txt new file mode 100644 index 0000000..2039cdb --- /dev/null +++ b/docs/haddock/tensorflow-logging-0.1.0.0/tensorflow-logging.txt @@ -0,0 +1,61 @@ +-- Hoogle documentation, generated by Haddock +-- See Hoogle, http://www.haskell.org/hoogle/ + + +-- | TensorBoard related functionality. +-- +-- Please see README.md +@package tensorflow-logging +@version 0.1.0.0 + + +-- | TensorBoard Summary generation. Provides type safe wrappers around raw +-- string emitting CoreOps. +-- +-- Example use: +-- +--
    +--   -- Call summary functions while constructing the graph.
    +--   createModel = do
    +--     loss <- -- ...
    +--     TF.scalarSummary loss
    +--   
    +--   -- Write summaries to an EventWriter.
    +--   train = TF.withEventWriter "/path/to/logs" $ \eventWriter -> do
    +--       summaryTensor <- TF.build TF.allSummaries
    +--       forM_ [1..] $ \step -> do
    +--           if (step % 100 == 0)
    +--               then do
    +--                   ((), summaryBytes) <- TF.run (trainStep, summaryTensor)
    +--                   let summary = decodeMessageOrDie (TF.unScalar summaryBytes)
    +--                   TF.logSummary eventWriter step summary
    +--               else TF.run_ trainStep
    +--   
    +module TensorFlow.Logging + +-- | Handle for logging TensorBoard events safely from multiple threads. +data EventWriter + +-- | Writes Event protocol buffers to event files. +withEventWriter :: (MonadIO m, MonadMask m) => FilePath -> (EventWriter -> m a) -> m a + +-- | Logs the given Event protocol buffer. +logEvent :: MonadIO m => EventWriter -> Event -> m () + +-- | Logs the given Summary event with an optional global step (use 0 if +-- not applicable). +logSummary :: MonadIO m => EventWriter -> Int64 -> Summary -> m () + +-- | Synonym for the tensors that return serialized Summary proto. +type SummaryTensor = Tensor Value ByteString + +-- | Adds a histogramSummary node. The tag argument is intentionally +-- limited to a single value for simplicity. +histogramSummary :: (MonadBuild m, TensorType t, t /= ByteString, t /= Bool) => ByteString -> Tensor v t -> m () + +-- | Adds a scalarSummary node. +scalarSummary :: (TensorType t, t /= ByteString, t /= Bool, MonadBuild m) => ByteString -> Tensor v t -> m () + +-- | Merge all summaries accumulated in the Build into one +-- summary. +mergeAllSummaries :: MonadBuild m => m SummaryTensor diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html index 2c105db..383104a 100644 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-Parse.html @@ -1,4 +1,4 @@ TensorFlow.Examples.MNIST.Parse

    tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Examples.MNIST.Parse

    Documentation

    type MNIST = Vector Word8 Source

    Utilities specific to MNIST.

    drawMNIST :: MNIST -> Text Source

    Produces a unicode rendering of the MNIST digit sample.

    checkEndian :: Get () Source

    Check's the file's endianess, throwing an error if it's not as expected.

    readMNISTSamples :: FilePath -> IO [MNIST] Source

    Reads an MNIST file and returns a list of samples.

    readMNISTLabels :: FilePath -> IO [Word8] Source

    Reads a list of MNIST labels from a file and returns them.

    \ No newline at end of file +

    tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Examples.MNIST.Parse

    Documentation

    type MNIST = Vector Word8

    Utilities specific to MNIST.

    drawMNIST :: MNIST -> Text

    Produces a unicode rendering of the MNIST digit sample.

    checkEndian :: Get ()

    Check's the file's endianess, throwing an error if it's not as expected.

    readMNISTSamples :: FilePath -> IO [MNIST]

    Reads an MNIST file and returns a list of samples.

    readMNISTLabels :: FilePath -> IO [Word8]

    Reads a list of MNIST labels from a file and returns them.

    readMessageFromFileOrDie :: Message m => FilePath -> IO m

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html index 630e30a..0079824 100644 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html +++ b/docs/haddock/tensorflow-mnist-0.1.0.0/TensorFlow-Examples-MNIST-TrainedGraph.html @@ -1,4 +1,4 @@ TensorFlow.Examples.MNIST.TrainedGraph

    tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

    Safe HaskellSafe
    LanguageHaskell2010

    TensorFlow.Examples.MNIST.TrainedGraph

    Description

    Paths to test helper files.

    Documentation

    mnistPb :: IO FilePath Source

    File containing a Tensorflow serialized proto of MNIST.

    wtsCkpt :: IO ByteString Source

    Files containing pre-trained weights for MNIST.

    biasCkpt :: IO ByteString Source

    Files containing pre-trained weights for MNIST.

    \ No newline at end of file +

    tensorflow-mnist-0.1.0.0: TensorFlow demo application for learning MNIST model.

    Safe HaskellSafe
    LanguageHaskell2010

    TensorFlow.Examples.MNIST.TrainedGraph

    Description

    Paths to test helper files.

    Documentation

    mnistPb :: IO FilePath

    File containing a Tensorflow serialized proto of MNIST.

    wtsCkpt :: IO ByteString

    Files containing pre-trained weights for MNIST.

    biasCkpt :: IO ByteString

    Files containing pre-trained weights for MNIST.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/Paths_tensorflow_mnist.html b/docs/haddock/tensorflow-mnist-0.1.0.0/src/Paths_tensorflow_mnist.html deleted file mode 100644 index 3dc1871..0000000 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/src/Paths_tensorflow_mnist.html +++ /dev/null @@ -1,46 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Paths_tensorflow_mnist.hs - - - -
    module Paths_tensorflow_mnist (
    -    version,
    -    getBinDir, getLibDir, getDataDir, getLibexecDir,
    -    getDataFileName, getSysconfDir
    -  ) where
    -
    -import qualified Control.Exception as Exception
    -import Data.Version (Version(..))
    -import System.Environment (getEnv)
    -import Prelude
    -
    -catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a
    -catchIO = Exception.catch
    -
    -version :: Version
    -version = Version [0,1,0,0] []
    -bindir, libdir, datadir, libexecdir, sysconfdir :: FilePath
    -
    -bindir     = "/Users/judahjacobson/tensorflow-haskell/.stack-work/install/x86_64-osx/lts-6.2/7.10.3/bin"
    -libdir     = "/Users/judahjacobson/tensorflow-haskell/.stack-work/install/x86_64-osx/lts-6.2/7.10.3/lib/x86_64-osx-ghc-7.10.3/tensorflow-mnist-0.1.0.0-2agxd0imrn964MW1mWb4VF"
    -datadir    = "/Users/judahjacobson/tensorflow-haskell/.stack-work/install/x86_64-osx/lts-6.2/7.10.3/share/x86_64-osx-ghc-7.10.3/tensorflow-mnist-0.1.0.0"
    -libexecdir = "/Users/judahjacobson/tensorflow-haskell/.stack-work/install/x86_64-osx/lts-6.2/7.10.3/libexec"
    -sysconfdir = "/Users/judahjacobson/tensorflow-haskell/.stack-work/install/x86_64-osx/lts-6.2/7.10.3/etc"
    -
    -getBinDir, getLibDir, getDataDir, getLibexecDir, getSysconfDir :: IO FilePath
    -getBinDir = catchIO (getEnv "tensorflow_mnist_bindir") (\_ -> return bindir)
    -getLibDir = catchIO (getEnv "tensorflow_mnist_libdir") (\_ -> return libdir)
    -getDataDir = catchIO (getEnv "tensorflow_mnist_datadir") (\_ -> return datadir)
    -getLibexecDir = catchIO (getEnv "tensorflow_mnist_libexecdir") (\_ -> return libexecdir)
    -getSysconfDir = catchIO (getEnv "tensorflow_mnist_sysconfdir") (\_ -> return sysconfdir)
    -
    -getDataFileName :: FilePath -> IO FilePath
    -getDataFileName name = do
    -  dir <- getDataDir
    -  return (dir ++ "/" ++ name)
    -
    - diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-Parse.html b/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-Parse.html deleted file mode 100644 index 27b8468..0000000 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-Parse.html +++ /dev/null @@ -1,107 +0,0 @@ - - - - - -src/TensorFlow/Examples/MNIST/Parse.hs - - - -
    -- Copyright 2016 TensorFlow authors.
    ---
    --- Licensed under the Apache License, Version 2.0 (the "License");
    --- you may not use this file except in compliance with the License.
    --- You may obtain a copy of the License at
    ---
    ---     http://www.apache.org/licenses/LICENSE-2.0
    ---
    --- Unless required by applicable law or agreed to in writing, software
    --- distributed under the License is distributed on an "AS IS" BASIS,
    --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    --- See the License for the specific language governing permissions and
    --- limitations under the License.
    -
    -{-# LANGUAGE OverloadedStrings #-}
    -{-# LANGUAGE OverloadedLists #-}
    -{-# LANGUAGE TypeSynonymInstances #-}
    -{-# LANGUAGE FlexibleInstances #-}
    -{-# LANGUAGE ViewPatterns #-}
    -
    -module TensorFlow.Examples.MNIST.Parse where
    -
    -import Control.Monad (when, liftM)
    -import Data.Binary.Get (Get, runGet, getWord32be, getLazyByteString)
    -import Data.ByteString.Lazy (toStrict, readFile)
    -import Data.List.Split (chunksOf)
    -import Data.Monoid ((<>))
    -import Data.ProtoLens (Message, decodeMessageOrDie)
    -import Data.Text (Text)
    -import Data.Word (Word8, Word32)
    -import Prelude hiding (readFile)
    -import qualified Codec.Compression.GZip as GZip
    -import qualified Data.ByteString.Lazy as L
    -import qualified Data.Text as Text
    -import qualified Data.Vector as V
    -
    --- | Utilities specific to MNIST.
    -type MNIST = V.Vector Word8
    -
    --- | Produces a unicode rendering of the MNIST digit sample.
    -drawMNIST :: MNIST -> Text
    -drawMNIST = chunk . block
    -  where
    -    block :: V.Vector Word8 -> Text
    -    block (V.splitAt 1 -> ([0], xs)) = " " <> block xs
    -    block (V.splitAt 1 -> ([n], xs)) = c `Text.cons` block xs
    -      where c = "\9617\9618\9619\9608" !! fromIntegral (n `div` 64)
    -    block (V.splitAt 1 -> _)   = ""
    -    chunk :: Text -> Text
    -    chunk "" = "\n"
    -    chunk xs = Text.take 28 xs <> "\n" <> chunk (Text.drop 28 xs)
    -
    --- | Check's the file's endianess, throwing an error if it's not as expected.
    -checkEndian :: Get ()
    -checkEndian = do
    -    magic <- getWord32be
    -    when (magic `notElem` ([2049, 2051] :: [Word32])) $
    -        fail "Expected big endian, but image file is little endian."
    -
    --- | Reads an MNIST file and returns a list of samples.
    -readMNISTSamples :: FilePath -> IO [MNIST]
    -readMNISTSamples path = do
    -    raw <- GZip.decompress <$> readFile path
    -    return $ runGet getMNIST raw
    -  where
    -    getMNIST :: Get [MNIST]
    -    getMNIST = do
    -        checkEndian
    -        -- Parse header data.
    -        cnt  <- liftM fromIntegral getWord32be
    -        rows <- liftM fromIntegral getWord32be
    -        cols <- liftM fromIntegral getWord32be
    -        -- Read all of the data, then split into samples.
    -        pixels <- getLazyByteString $ fromIntegral $ cnt * rows * cols
    -        return $ V.fromList <$> chunksOf (rows * cols) (L.unpack pixels)
    -
    --- | Reads a list of MNIST labels from a file and returns them.
    -readMNISTLabels :: FilePath -> IO [Word8]
    -readMNISTLabels path = do
    -    raw <- GZip.decompress <$> readFile path
    -    return $ runGet getLabels raw
    -  where getLabels :: Get [Word8]
    -        getLabels = do
    -            checkEndian
    -            -- Parse header data.
    -            cnt <- liftM fromIntegral getWord32be
    -            -- Read all of the labels.
    -            L.unpack <$> getLazyByteString cnt
    -
    -readMessageFromFileOrDie :: Message m => FilePath -> IO m
    -readMessageFromFileOrDie path = do
    -    pb <- readFile path
    -    return $ decodeMessageOrDie $ toStrict pb
    -
    --- TODO: Write a writeMessageFromFileOrDie and read/write non-lethal
    ---             versions.
    -
    - diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-TrainedGraph.html b/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-TrainedGraph.html deleted file mode 100644 index 881e4d8..0000000 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/src/TensorFlow-Examples-MNIST-TrainedGraph.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - -src-data/TensorFlow/Examples/MNIST/TrainedGraph.hs - - - -
    -- Copyright 2016 TensorFlow authors.
    ---
    --- Licensed under the Apache License, Version 2.0 (the "License");
    --- you may not use this file except in compliance with the License.
    --- You may obtain a copy of the License at
    ---
    ---     http://www.apache.org/licenses/LICENSE-2.0
    ---
    --- Unless required by applicable law or agreed to in writing, software
    --- distributed under the License is distributed on an "AS IS" BASIS,
    --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    --- See the License for the specific language governing permissions and
    --- limitations under the License.
    -
    -{-# LANGUAGE OverloadedStrings #-}
    --- | Paths to test helper files.
    -module TensorFlow.Examples.MNIST.TrainedGraph where
    -
    -import Paths_tensorflow_mnist (getDataFileName)
    -import Data.ByteString (ByteString)
    -import Data.ByteString.Char8 (pack)
    -
    --- | File containing a Tensorflow serialized proto of MNIST.
    -mnistPb :: IO FilePath
    -mnistPb = getDataFileName "data/MNIST.pb"
    -
    --- | Files containing pre-trained weights for MNIST.
    -wtsCkpt, biasCkpt :: IO ByteString
    -wtsCkpt = pack <$> getDataFileName "data/MNISTWts.ckpt"
    -biasCkpt = pack <$> getDataFileName "data/MNISTBias.ckpt"
    -
    - diff --git a/docs/haddock/tensorflow-mnist-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-mnist-0.1.0.0/src/hscolour.css deleted file mode 100644 index c15919e..0000000 --- a/docs/haddock/tensorflow-mnist-0.1.0.0/src/hscolour.css +++ /dev/null @@ -1,5 +0,0 @@ -.hs-keyglyph, .hs-layout {color: red;} -.hs-keyword {color: blue;} -.hs-comment, .hs-comment a {color: green;} -.hs-str, .hs-chr {color: teal;} -.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html index 22288df..b9a21bf 100644 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html +++ b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/TensorFlow-Examples-MNIST-InputData.html @@ -1,4 +1,4 @@ TensorFlow.Examples.MNIST.InputData

    tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

    Safe HaskellSafe
    LanguageHaskell2010

    TensorFlow.Examples.MNIST.InputData

    Documentation

    trainingImageData :: IO FilePath Source

    Download the files containing the canonical MNIST samples and labels.

    trainingLabelData :: IO FilePath Source

    Download the files containing the canonical MNIST samples and labels.

    \ No newline at end of file +

    tensorflow-mnist-input-data-0.1.0.0: Downloader of input data for training MNIST.

    Safe HaskellSafe
    LanguageHaskell2010

    TensorFlow.Examples.MNIST.InputData

    Documentation

    trainingImageData :: IO FilePath

    Download the files containing the canonical MNIST samples and labels.

    trainingLabelData :: IO FilePath

    Download the files containing the canonical MNIST samples and labels.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/Paths_tensorflow_mnist_input_data.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/Paths_tensorflow_mnist_input_data.html deleted file mode 100644 index f25ed0e..0000000 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/Paths_tensorflow_mnist_input_data.html +++ /dev/null @@ -1,46 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Paths_tensorflow_mnist_input_data.hs - - - -
    module Paths_tensorflow_mnist_input_data (
    -    version,
    -    getBinDir, getLibDir, getDataDir, getLibexecDir,
    -    getDataFileName, getSysconfDir
    -  ) where
    -
    -import qualified Control.Exception as Exception
    -import Data.Version (Version(..))
    -import System.Environment (getEnv)
    -import Prelude
    -
    -catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a
    -catchIO = Exception.catch
    -
    -version :: Version
    -version = Version [0,1,0,0] []
    -bindir, libdir, datadir, libexecdir, sysconfdir :: FilePath
    -
    -bindir     = "/Users/judahjacobson/tensorflow-haskell/.stack-work/install/x86_64-osx/lts-6.2/7.10.3/bin"
    -libdir     = "/Users/judahjacobson/tensorflow-haskell/.stack-work/install/x86_64-osx/lts-6.2/7.10.3/lib/x86_64-osx-ghc-7.10.3/tensorflow-mnist-input-data-0.1.0.0-JIQTTyi85Nv6pdBnglu33Q"
    -datadir    = "/Users/judahjacobson/tensorflow-haskell/.stack-work/install/x86_64-osx/lts-6.2/7.10.3/share/x86_64-osx-ghc-7.10.3/tensorflow-mnist-input-data-0.1.0.0"
    -libexecdir = "/Users/judahjacobson/tensorflow-haskell/.stack-work/install/x86_64-osx/lts-6.2/7.10.3/libexec"
    -sysconfdir = "/Users/judahjacobson/tensorflow-haskell/.stack-work/install/x86_64-osx/lts-6.2/7.10.3/etc"
    -
    -getBinDir, getLibDir, getDataDir, getLibexecDir, getSysconfDir :: IO FilePath
    -getBinDir = catchIO (getEnv "tensorflow_mnist_input_data_bindir") (\_ -> return bindir)
    -getLibDir = catchIO (getEnv "tensorflow_mnist_input_data_libdir") (\_ -> return libdir)
    -getDataDir = catchIO (getEnv "tensorflow_mnist_input_data_datadir") (\_ -> return datadir)
    -getLibexecDir = catchIO (getEnv "tensorflow_mnist_input_data_libexecdir") (\_ -> return libexecdir)
    -getSysconfDir = catchIO (getEnv "tensorflow_mnist_input_data_sysconfdir") (\_ -> return sysconfdir)
    -
    -getDataFileName :: FilePath -> IO FilePath
    -getDataFileName name = do
    -  dir <- getDataDir
    -  return (dir ++ "/" ++ name)
    -
    - diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/TensorFlow-Examples-MNIST-InputData.html b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/TensorFlow-Examples-MNIST-InputData.html deleted file mode 100644 index 2dd0400..0000000 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/TensorFlow-Examples-MNIST-InputData.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - -src/TensorFlow/Examples/MNIST/InputData.hs - - - -
    -- Copyright 2016 TensorFlow authors.
    ---
    --- Licensed under the Apache License, Version 2.0 (the "License");
    --- you may not use this file except in compliance with the License.
    --- You may obtain a copy of the License at
    ---
    ---     http://www.apache.org/licenses/LICENSE-2.0
    ---
    --- Unless required by applicable law or agreed to in writing, software
    --- distributed under the License is distributed on an "AS IS" BASIS,
    --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    --- See the License for the specific language governing permissions and
    --- limitations under the License.
    -
    -module TensorFlow.Examples.MNIST.InputData
    -  ( trainingImageData
    -  , trainingLabelData
    -  , testImageData
    -  , testLabelData
    -  ) where
    -
    -import Paths_tensorflow_mnist_input_data (getDataFileName)
    -
    --- | Download the files containing the canonical MNIST samples and labels.
    -trainingImageData, trainingLabelData :: IO FilePath
    -trainingImageData = getDataFileName "train-images-idx3-ubyte.gz"
    -trainingLabelData = getDataFileName "train-labels-idx1-ubyte.gz"
    -
    -testImageData, testLabelData :: IO FilePath
    -testImageData = getDataFileName "t10k-images-idx3-ubyte.gz"
    -testLabelData = getDataFileName "t10k-labels-idx1-ubyte.gz"
    -
    - diff --git a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/hscolour.css deleted file mode 100644 index c15919e..0000000 --- a/docs/haddock/tensorflow-mnist-input-data-0.1.0.0/src/hscolour.css +++ /dev/null @@ -1,5 +0,0 @@ -.hs-keyglyph, .hs-layout {color: red;} -.hs-keyword {color: blue;} -.hs-comment, .hs-comment a {color: green;} -.hs-str, .hs-chr {color: teal;} -.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/TensorFlow-NN.html b/docs/haddock/tensorflow-nn-0.1.0.0/TensorFlow-NN.html index fe71916..0004a3a 100644 --- a/docs/haddock/tensorflow-nn-0.1.0.0/TensorFlow-NN.html +++ b/docs/haddock/tensorflow-nn-0.1.0.0/TensorFlow-NN.html @@ -1,7 +1,7 @@ TensorFlow.NN

    tensorflow-nn-0.1.0.0: Friendly layer around TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.NN

    Documentation

    sigmoidCrossEntropyWithLogits Source

    Arguments

    :: (OneOf `[Float, Double]` a, TensorType a, Num a) 
    => Tensor Value a

    logits

    -> Tensor Value a

    targets

    -> Build (Tensor Value a) 

    Computes sigmoid cross entropy given logits.

    Measures the probability error in discrete classification tasks in which each +

    tensorflow-nn-0.1.0.0: Friendly layer around TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.NN

    Documentation

    sigmoidCrossEntropyWithLogits

    Arguments

    :: (MonadBuild m, OneOf `[Float, Double]` a, TensorType a, Num a) 
    => Tensor Value a

    logits

    -> Tensor Value a

    targets

    -> m (Tensor Value a) 

    Computes sigmoid cross entropy given logits.

    Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive. For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time.

    For brevity, let `x = logits`, `z = targets`. The logistic loss is

    z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/src/TensorFlow-NN.html b/docs/haddock/tensorflow-nn-0.1.0.0/src/TensorFlow-NN.html deleted file mode 100644 index b487eab..0000000 --- a/docs/haddock/tensorflow-nn-0.1.0.0/src/TensorFlow-NN.html +++ /dev/null @@ -1,98 +0,0 @@ - - - - - -src/TensorFlow/NN.hs - - - -

    -- Copyright 2016 TensorFlow authors.
    ---
    --- Licensed under the Apache License, Version 2.0 (the "License");
    --- you may not use this file except in compliance with the License.
    --- You may obtain a copy of the License at
    ---
    ---     http://www.apache.org/licenses/LICENSE-2.0
    ---
    --- Unless required by applicable law or agreed to in writing, software
    --- distributed under the License is distributed on an "AS IS" BASIS,
    --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    --- See the License for the specific language governing permissions and
    --- limitations under the License.
    -
    -{-# LANGUAGE DataKinds #-}
    -{-# LANGUAGE OverloadedStrings #-}
    -
    -module TensorFlow.NN
    -    ( sigmoidCrossEntropyWithLogits
    -    ) where
    -
    -import Prelude hiding           ( log
    -                                , exp
    -                                )
    -import TensorFlow.Build         ( Build
    -                                , render
    -                                , withNameScope
    -                                )
    -import TensorFlow.GenOps.Core   ( greaterEqual
    -                                , select
    -                                , log
    -                                , exp
    -                                )
    -import TensorFlow.Tensor        ( Tensor(..)
    -                                , Value
    -                                )
    -import TensorFlow.Types         ( TensorType(..)
    -                                , OneOf
    -                                )
    -import TensorFlow.Ops           ( zerosLike
    -                                , add
    -                                )
    -
    --- | Computes sigmoid cross entropy given `logits`.
    ---
    --- Measures the probability error in discrete classification tasks in which each
    --- class is independent and not mutually exclusive.  For instance, one could
    --- perform multilabel classification where a picture can contain both an elephant
    --- and a dog at the same time.
    ---
    --- For brevity, let `x = logits`, `z = targets`.  The logistic loss is
    ---
    ---        z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
    ---      = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
    ---      = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
    ---      = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
    ---      = (1 - z) * x + log(1 + exp(-x))
    ---      = x - x * z + log(1 + exp(-x))
    ---
    ---  For x < 0, to avoid overflow in exp(-x), we reformulate the above
    ---
    ---        x - x * z + log(1 + exp(-x))
    ---      = log(exp(x)) - x * z + log(1 + exp(-x))
    ---      = - x * z + log(1 + exp(x))
    ---
    ---  Hence, to ensure stability and avoid overflow, the implementation uses this
    ---  equivalent formulation
    ---
    ---      max(x, 0) - x * z + log(1 + exp(-abs(x)))
    ---
    ---  `logits` and `targets` must have the same type and shape.
    -sigmoidCrossEntropyWithLogits
    -  :: (OneOf '[Float, Double] a, TensorType a, Num a)
    -     => Tensor Value a          -- ^ __logits__
    -     -> Tensor Value a          -- ^ __targets__
    -     -> Build (Tensor Value a)
    -sigmoidCrossEntropyWithLogits logits targets = do
    -    logits' <- render logits
    -    targets' <- render targets
    -    let zeros = zerosLike logits'
    -        cond = logits' `greaterEqual` zeros
    -        relu_logits = select cond logits' zeros
    -        neg_abs_logits = select cond (-logits') logits'
    -    withNameScope "logistic_loss" $ do
    -        left  <- render $ relu_logits - logits' * targets'
    -        right <- render $ log (1 + exp neg_abs_logits)
    -        withNameScope "sigmoid_add" $ render $ left `add` right
    -
    - diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-nn-0.1.0.0/src/hscolour.css deleted file mode 100644 index c15919e..0000000 --- a/docs/haddock/tensorflow-nn-0.1.0.0/src/hscolour.css +++ /dev/null @@ -1,5 +0,0 @@ -.hs-keyglyph, .hs-layout {color: red;} -.hs-keyword {color: blue;} -.hs-comment, .hs-comment a {color: green;} -.hs-str, .hs-chr {color: teal;} -.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-nn-0.1.0.0/tensorflow-nn.txt b/docs/haddock/tensorflow-nn-0.1.0.0/tensorflow-nn.txt index dac451d..041f9a8 100644 --- a/docs/haddock/tensorflow-nn-0.1.0.0/tensorflow-nn.txt +++ b/docs/haddock/tensorflow-nn-0.1.0.0/tensorflow-nn.txt @@ -37,4 +37,4 @@ module TensorFlow.NN -- -- logits and targets must have the same type and -- shape. -sigmoidCrossEntropyWithLogits :: (OneOf '[Float, Double] a, TensorType a, Num a) => Tensor Value a -> Tensor Value a -> Build (Tensor Value a) +sigmoidCrossEntropyWithLogits :: (MonadBuild m, OneOf '[Float, Double] a, TensorType a, Num a) => Tensor Value a -> Tensor Value a -> m (Tensor Value a) diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-ParsedOp.html b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-ParsedOp.html index 8400006..1fd0479 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-ParsedOp.html +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen-ParsedOp.html @@ -1,12 +1,10 @@ TensorFlow.OpGen.ParsedOp

    tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.OpGen.ParsedOp

    Description

    This module helps parse the proto OpDef into a Haskell type which is more +

    tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.OpGen.ParsedOp

    Description

    This module helps parse the proto OpDef into a Haskell type which is more descriptive of how the attributes and arguments will be used in the - generated code.

    Documentation

    data ParsedOp Source

    Constructors

    ParsedOp 

    Fields

    parsedOpName :: Name
     
    parsedOpSummary :: Text
     
    parsedOpDescription :: Text
     
    parsedInputs :: [ParsedArg]
     
    parsedOutputs :: [ParsedArg]
     
    explicitInputAttrs :: [Attr AttrType]

    Attributes that must be set explicitly when creating the op. - Associated with the type of the attribute.

    inferredTypeAttrs :: [Attr [DataType]]

    Attributes that are type parameters. - Associated with the list of allowed types (see: TensorFlow.Types.OneOf). - If this list is empty, then any type is acceptable.

    inferredListSizeAttrs :: [Attr (NonEmpty Name)]
     
    parsedOpIsMonadic :: Bool

    Whether this op is stateful or takes a stateful input. Such ops + generated code.

    Documentation

    data ParsedOp

    Constructors

    ParsedOp 

    Fields

    parsedOpName :: Name
     
    parsedOpSummary :: Text
     
    parsedOpDescription :: Text
     
    parsedInputs :: [ParsedArg]
     
    parsedOutputs :: [ParsedArg]
     
    explicitInputAttrs :: [Attr AttrType]

    Attributes that must be set explicitly when creating the op. + Associated with the type of the attribute.

    inferredTypeAttrs :: [Attr TypeParam]

    Attributes that are type parameters.

    inferredListSizeAttrs :: [Attr (NonEmpty Name)]
     
    parsedOpIsMonadic :: Bool

    Whether this op is stateful or takes a stateful input. Such ops should not be CSE'd and must be monadic in our API (i.e., return a - Build action).

    data Name Source

    Constructors

    Name 

    newtype HaskellName Source

    A name that's appropriate for a variable in a Haskell source file.

    Constructors

    HaskellName 

    Fields

    unHaskellName :: Text
     

    newtype TFName Source

    A raw name as specified in the OpDef proto.

    Constructors

    TFName 

    Fields

    unTFName :: Text
     

    data Attr a Source

    A named attribute, associated with some information about it.

    Constructors

    Attr 

    data AttrType Source

    The type of an attribute.

    Instances

    data ParsedArg Source

    An input or output argument (Tensor) for an op.

    data ParsedArgCase Source

    Constructors

    SimpleArg 

    Fields

    argType :: ArgType
     
    ListArg 

    Fields

    argLength :: Name

    The attribute that specifies this list's length.

    argType :: ArgType
     
    MixedListArg

    A heterogeneous list. - TODO(judahjacobson): Implement this.

    Fields

    argTypeAttr :: Name
     

    data ArgType Source

    The type of an argument.

    Constructors

    ArgTypeFixed DataType

    A fixed type.

    ArgTypeAttr Name

    A type that depends on an attribute.

    \ No newline at end of file + Build action).

    data Name

    Constructors

    Name 

    newtype HaskellName

    A name that's appropriate for a variable in a Haskell source file.

    Constructors

    HaskellName 

    Fields

    unHaskellName :: Text
     

    newtype TFName

    A raw name as specified in the OpDef proto.

    Constructors

    TFName 

    Fields

    unTFName :: Text
     

    Instances

    data Attr a

    A named attribute, associated with some information about it.

    Constructors

    Attr 

    Fields

    attrName :: Name
     
    attrDescription :: Text
     
    attrInfo :: a
     

    data AttrType

    The type of an attribute.

    Instances

    data TypeParam

    Constructors

    TypeParam 

    Fields

    typeParamIsList :: Bool
     
    typeParamRestrictions :: Maybe (NonEmpty DataType)

    The list of allowed types (see: TensorFlow.Types.OneOf). + If Nothing, then any type is acceptable.

    data ParsedArg

    An input or output argument (Tensor) for an op.

    data ParsedArgCase

    Constructors

    SimpleArg 
    ListArg 

    Fields

    argLength :: Name

    The attribute that specifies this list's length.

    argType :: ArgType
     
    argCaseKind :: ArgKind
     
    MixedListArg

    A heterogeneous list.

    ResourceArg 

    data ArgType

    The type of an argument.

    Constructors

    ArgTypeFixed DataType

    A fixed type.

    ArgTypeAttr Name

    A type that depends on an attribute.

    camelCase :: Text -> Text

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html index 4ea5883..b60dd5c 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/TensorFlow-OpGen.html @@ -1,14 +1,14 @@ TensorFlow.OpGen

    tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.OpGen

    Description

    Rendering of TensorFlow operations as Haskell functions.

    The basic type signature generated for each op is:

    {constraints} => {mandatory attrs} -> {input tensors} -> {output tensors}

    where:

    • {mandatory attrs} is of the form A_1 -> ... -> A_N, where each A is an +

      tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

      Safe HaskellNone
      LanguageHaskell2010

      TensorFlow.OpGen

      Description

      Rendering of TensorFlow operations as Haskell functions.

      The basic type signature generated for each op is:

      {constraints} => {mandatory attrs} -> {input tensors} -> {output tensors}

      where:

      • {mandatory attrs} is of the form A_1 -> ... -> A_N, where each A is an op attribute that doesn't have a default and can't be inferred from other inputs.
      • {constraints} restrict the type parameters of the input and output tensors (for example: TensorType or OneOf).
      • {input tensors} is of the form T_1 -> ... -> T_N, where each T is of -the form Tensor Ref a, Tensor v a or ResourceHandle a (or a list of one +the form Tensor Ref a, Tensor v a or ResourceHandle (or a list of one of those types), and a is either a concrete type or a (constrained) type variable.
      • {output tensors} is of the form (T_1,...,T_N) for "pure" ops, and Build (T_1,...,T_N) for "stateful" ops. An op is considered "stateful" if it takes a Tensor Ref or ResourceHandle as input, or if it's explicitly marked "Stateful" in its REGISTER_OP definition. (If there are no outputs, -it is either ControlNode or Build ControlNode.)
      \ No newline at end of file +it is either ControlNode or Build ControlNode.)

    Documentation

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html index cc0ecfc..d7362a3 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations. (Index)

    tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

    Index

    ArgKindTensorFlow.OpGen.ParsedOp
    argLengthTensorFlow.OpGen.ParsedOp
    ArgResourceTensorFlow.OpGen.ParsedOp
    ArgTensorEitherTensorFlow.OpGen.ParsedOp
    ArgTensorRefTensorFlow.OpGen.ParsedOp
    ArgTensorValueTensorFlow.OpGen.ParsedOp
    ArgTypeTensorFlow.OpGen.ParsedOp
    argTypeTensorFlow.OpGen.ParsedOp
    ArgTypeAttrTensorFlow.OpGen.ParsedOp
    argTypeAttrTensorFlow.OpGen.ParsedOp
    ArgTypeFixedTensorFlow.OpGen.ParsedOp
    Attr 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    AttrBaseTypeTensorFlow.OpGen.ParsedOp
    AttrBoolTensorFlow.OpGen.ParsedOp
    AttrBytesTensorFlow.OpGen.ParsedOp
    attrDescriptionTensorFlow.OpGen.ParsedOp
    AttrFloatTensorFlow.OpGen.ParsedOp
    attrInfoTensorFlow.OpGen.ParsedOp
    AttrInt64TensorFlow.OpGen.ParsedOp
    AttrListTensorFlow.OpGen.ParsedOp
    attrNameTensorFlow.OpGen.ParsedOp
    AttrShapeTensorFlow.OpGen.ParsedOp
    AttrSingleTensorFlow.OpGen.ParsedOp
    AttrTensorTensorFlow.OpGen.ParsedOp
    AttrType 
    1 (Data Constructor)TensorFlow.OpGen.ParsedOp
    2 (Type/Class)TensorFlow.OpGen.ParsedOp
    camelCaseTensorFlow.OpGen.ParsedOp
    docOpListTensorFlow.OpGen
    excludeListTensorFlow.OpGen
    explicitInputAttrsTensorFlow.OpGen.ParsedOp
    flagParserTensorFlow.OpGen
    HaskellName 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    haskellNameTensorFlow.OpGen.ParsedOp
    inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
    inferredTypeAttrsTensorFlow.OpGen.ParsedOp
    ListArgTensorFlow.OpGen.ParsedOp
    MixedListArgTensorFlow.OpGen.ParsedOp
    Name 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    OpGenFlags 
    1 (Type/Class)TensorFlow.OpGen
    2 (Data Constructor)TensorFlow.OpGen
    outputFileTensorFlow.OpGen
    ParsedArg 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    ParsedArgCaseTensorFlow.OpGen.ParsedOp
    parsedArgCaseTensorFlow.OpGen.ParsedOp
    parsedArgDescriptionTensorFlow.OpGen.ParsedOp
    parsedArgKindTensorFlow.OpGen.ParsedOp
    parsedArgNameTensorFlow.OpGen.ParsedOp
    parsedInputsTensorFlow.OpGen.ParsedOp
    ParsedOp 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    parsedOpDescriptionTensorFlow.OpGen.ParsedOp
    parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
    parsedOpNameTensorFlow.OpGen.ParsedOp
    parsedOpSummaryTensorFlow.OpGen.ParsedOp
    parsedOutputsTensorFlow.OpGen.ParsedOp
    parseOpTensorFlow.OpGen.ParsedOp
    prefixTensorFlow.OpGen
    SimpleArgTensorFlow.OpGen.ParsedOp
    TFName 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    tfNameTensorFlow.OpGen.ParsedOp
    unHaskellNameTensorFlow.OpGen.ParsedOp
    unTFNameTensorFlow.OpGen.ParsedOp
    \ No newline at end of file +

    tensorflow-opgen-0.1.0.0: Code generation for TensorFlow operations.

    Index

    argCaseKindTensorFlow.OpGen.ParsedOp
    ArgKindTensorFlow.OpGen.ParsedOp
    argKindTensorFlow.OpGen.ParsedOp
    argLengthTensorFlow.OpGen.ParsedOp
    ArgSomeTensorTensorFlow.OpGen.ParsedOp
    ArgTensorBuildTensorFlow.OpGen.ParsedOp
    ArgTensorRefTensorFlow.OpGen.ParsedOp
    ArgTensorValueTensorFlow.OpGen.ParsedOp
    ArgTypeTensorFlow.OpGen.ParsedOp
    argTypeTensorFlow.OpGen.ParsedOp
    ArgTypeAttrTensorFlow.OpGen.ParsedOp
    argTypeAttrTensorFlow.OpGen.ParsedOp
    ArgTypeFixedTensorFlow.OpGen.ParsedOp
    Attr 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    AttrBaseTypeTensorFlow.OpGen.ParsedOp
    AttrBoolTensorFlow.OpGen.ParsedOp
    AttrBytesTensorFlow.OpGen.ParsedOp
    attrDescriptionTensorFlow.OpGen.ParsedOp
    AttrFloatTensorFlow.OpGen.ParsedOp
    attrInfoTensorFlow.OpGen.ParsedOp
    AttrInt64TensorFlow.OpGen.ParsedOp
    AttrListTensorFlow.OpGen.ParsedOp
    attrNameTensorFlow.OpGen.ParsedOp
    AttrShapeTensorFlow.OpGen.ParsedOp
    AttrSingleTensorFlow.OpGen.ParsedOp
    AttrTensorTensorFlow.OpGen.ParsedOp
    AttrType 
    1 (Data Constructor)TensorFlow.OpGen.ParsedOp
    2 (Type/Class)TensorFlow.OpGen.ParsedOp
    camelCaseTensorFlow.OpGen.ParsedOp
    docOpListTensorFlow.OpGen
    excludeListTensorFlow.OpGen
    explicitInputAttrsTensorFlow.OpGen.ParsedOp
    flagParserTensorFlow.OpGen
    HaskellName 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    haskellNameTensorFlow.OpGen.ParsedOp
    inferredListSizeAttrsTensorFlow.OpGen.ParsedOp
    inferredTypeAttrsTensorFlow.OpGen.ParsedOp
    ListArgTensorFlow.OpGen.ParsedOp
    MixedListArgTensorFlow.OpGen.ParsedOp
    Name 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    OpGenFlags 
    1 (Type/Class)TensorFlow.OpGen
    2 (Data Constructor)TensorFlow.OpGen
    outputFileTensorFlow.OpGen
    ParsedArg 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    ParsedArgCaseTensorFlow.OpGen.ParsedOp
    parsedArgCaseTensorFlow.OpGen.ParsedOp
    parsedArgDescriptionTensorFlow.OpGen.ParsedOp
    parsedArgNameTensorFlow.OpGen.ParsedOp
    parsedInputsTensorFlow.OpGen.ParsedOp
    ParsedOp 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    parsedOpDescriptionTensorFlow.OpGen.ParsedOp
    parsedOpIsMonadicTensorFlow.OpGen.ParsedOp
    parsedOpNameTensorFlow.OpGen.ParsedOp
    parsedOpSummaryTensorFlow.OpGen.ParsedOp
    parsedOutputsTensorFlow.OpGen.ParsedOp
    parseOpTensorFlow.OpGen.ParsedOp
    prefixTensorFlow.OpGen
    ResourceArgTensorFlow.OpGen.ParsedOp
    SimpleArgTensorFlow.OpGen.ParsedOp
    TFName 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    tfNameTensorFlow.OpGen.ParsedOp
    TypeParam 
    1 (Type/Class)TensorFlow.OpGen.ParsedOp
    2 (Data Constructor)TensorFlow.OpGen.ParsedOp
    typeParamIsListTensorFlow.OpGen.ParsedOp
    typeParamRestrictionsTensorFlow.OpGen.ParsedOp
    unHaskellNameTensorFlow.OpGen.ParsedOp
    unTFNameTensorFlow.OpGen.ParsedOp
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-ParsedOp.html b/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-ParsedOp.html index f05dfc2..b7407eb 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-ParsedOp.html +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/mini_TensorFlow-OpGen-ParsedOp.html @@ -1,4 +1,4 @@ TensorFlow.OpGen.ParsedOp

    TensorFlow.OpGen.ParsedOp

    \ No newline at end of file +

    TensorFlow.OpGen.ParsedOp

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen-ParsedOp.html b/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen-ParsedOp.html deleted file mode 100644 index df12422..0000000 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen-ParsedOp.html +++ /dev/null @@ -1,324 +0,0 @@ - - - - - -src/TensorFlow/OpGen/ParsedOp.hs - - - -
    -- | This module helps parse the proto OpDef into a Haskell type which is more
    --- descriptive of how the attributes and arguments will be used in the
    --- generated code.
    -{-# LANGUAGE LambdaCase #-}
    -{-# LANGUAGE OverloadedStrings #-}
    -{-# LANGUAGE RecordWildCards #-}
    -module TensorFlow.OpGen.ParsedOp
    -    ( ParsedOp(..)
    -    , Name(..)
    -    , HaskellName(..)
    -    , TFName(..)
    -    , Attr(..)
    -    , AttrType(..)
    -    , AttrBaseType(..)
    -    , ParsedArg(..)
    -    , ParsedArgCase(..)
    -    , ArgType(..)
    -    , ArgKind(..)
    -    , parseOp
    -    , camelCase
    -    ) where
    -
    -import Data.Char (toUpper, toLower)
    -import Data.List (sortBy)
    -import Data.List.NonEmpty (NonEmpty, nonEmpty)
    -import Data.Maybe (mapMaybe)
    -import Data.Monoid ((<>))
    -import Data.Ord (comparing)
    -import qualified Data.Set as Set
    -import Data.Text (Text)
    -import qualified Data.Text as Text
    -import Lens.Family2 ((^.))
    -import Proto.Tensorflow.Core.Framework.AttrValue (list)
    -import Proto.Tensorflow.Core.Framework.OpDef
    -    ( OpDef
    -    , OpDef'ArgDef
    -    , OpDef'AttrDef
    -    , allowedValues
    -    , attr
    -    , maybe'defaultValue
    -    , description
    -    , name
    -    , inputArg
    -    , isRef
    -    , isStateful
    -    , outputArg
    -    , summary
    -    , typeListAttr
    -    , numberAttr
    -    , typeAttr
    -    , type'
    -    )
    -import Proto.Tensorflow.Core.Framework.Types (DataType(DT_RESOURCE))
    -
    -data ParsedOp = ParsedOp
    -    { parsedOpName :: Name
    -    , parsedOpSummary :: Text
    -    , parsedOpDescription :: Text
    -    , parsedInputs :: [ParsedArg]
    -    , parsedOutputs :: [ParsedArg]
    -    , explicitInputAttrs :: [Attr AttrType]
    -        -- ^ Attributes that must be set explicitly when creating the op.
    -        -- Associated with the type of the attribute.
    -    , inferredTypeAttrs :: [Attr [DataType]]
    -        -- ^ Attributes that are type parameters.
    -        -- Associated with the list of allowed types (see: TensorFlow.Types.OneOf).
    -        -- If this list is empty, then any type is acceptable.
    -    , inferredListSizeAttrs :: [Attr (NonEmpty Name)]
    -        -- Attributes which are list sizes (ints) that are inferred automatically
    -        -- from one or more of the input tensors.
    -        -- Associated with the list of tensors whose size it describes.
    -    , parsedOpIsMonadic :: Bool
    -        -- ^ Whether this op is stateful or takes a stateful input.  Such ops
    -        -- should not be CSE'd and must be monadic in our API (i.e., return a
    -        -- Build action).
    -    }
    -
    -data Name = Name
    -    { haskellName :: HaskellName
    -    , tfName :: TFName
    -    }
    -
    --- | A raw name as specified in the OpDef proto.
    -newtype TFName = TFName { unTFName :: Text }
    -    deriving (Eq, Ord)
    -
    --- | A name that's appropriate for a variable in a Haskell source file.
    -newtype HaskellName = HaskellName { unHaskellName :: Text }
    -
    --- | A named attribute, associated with some information about it.
    -data Attr a = Attr
    -    { attrName :: Name
    -    , attrDescription :: Text
    -    , attrInfo :: a
    -    }
    -
    --- | The type of an attribute.
    -data AttrType = AttrSingle AttrBaseType
    -                | AttrList AttrBaseType
    -                deriving Eq
    -
    -data AttrBaseType = AttrBytes | AttrInt64 | AttrFloat | AttrBool
    -                | AttrType | AttrShape | AttrTensor
    -                deriving Eq
    -
    --- | An input or output argument (Tensor) for an op.
    -data ParsedArg = ParsedArg
    -    { parsedArgName :: Name
    -    , parsedArgDescription :: Text
    -    , parsedArgCase :: ParsedArgCase
    -    , parsedArgKind :: ArgKind
    -    }
    -
    -data ParsedArgCase
    -    = SimpleArg { argType :: ArgType }
    -    | ListArg
    -        { argLength :: Name  -- ^ The attribute that specifies this list's length.
    -        , argType :: ArgType
    -        }
    -    | MixedListArg { argTypeAttr :: Name }
    -        -- ^ A heterogeneous list.
    -        -- TODO(judahjacobson): Implement this.
    -
    --- | The type of an argument.
    -data ArgType
    -    = ArgTypeFixed DataType -- ^ A fixed type.
    -    | ArgTypeAttr Name  -- ^ A type that depends on an attribute.
    -
    --- The kind of an op input or output (not including the argument type `a`).
    -data ArgKind
    -    = ArgTensorRef -- Tensor Ref a
    -    | ArgTensorValue -- Tensor Value a
    -    | ArgTensorEither Text -- Tensor v a; the Text is the variable `v`
    -    | ArgResource -- Resource a
    -
    -isRefKind :: ArgKind -> Bool
    -isRefKind ArgTensorRef = True
    -isRefKind ArgResource = True
    -isRefKind _ = False
    -
    -makeName :: Text -> Name
    -makeName n = Name
    -    { haskellName = HaskellName $ fixReservedName $ lowCase n
    -    , tfName = TFName n
    -    }
    -
    --- | Change a name so it doesn't conflict with any Haskell keywords.
    -fixReservedName :: Text -> Text
    -fixReservedName n
    -    | n `Set.member` reservedKeywords = n <> "'"
    -    | otherwise = n
    -
    -reservedKeywords :: Set.Set Text
    -reservedKeywords = Set.fromList $
    -    -- Haskell2010 keywords:
    -    -- https://www.haskell.org/onlinereport/haskell2010/haskellch2.html#x7-180002.4
    -    -- We don't include keywords that are allowed to be variable names,
    -    -- in particular: "as", "forall", and "hiding".
    -    [ "case"
    -    , "class"
    -    , "data"
    -    , "default"
    -    , "deriving"
    -    , "do"
    -    , "else"
    -    , "foreign"
    -    , "if"
    -    , "import"
    -    , "in"
    -    , "infix"
    -    , "infixl"
    -    , "infixr"
    -    , "instance"
    -    , "let"
    -    , "module"
    -    , "newtype"
    -    , "of"
    -    , "then"
    -    , "type"
    -    , "where"
    -    ]
    -    ++  -- Nonstandard extensions
    -    [ "mdo"   -- RecursiveDo
    -    , "rec"   -- Arrows, RecursiveDo
    -    , "proc"  -- Arrows
    -    ]
    -
    --- | Lower-case the given text.
    -lowCase :: Text -> Text
    -lowCase = forceCase toLower
    -
    -forceCase :: (Char -> Char) -> Text -> Text
    -forceCase convert s = maybe "" (\(c, cs) -> Text.cons (convert c) cs)
    -                      (Text.uncons s)
    -
    -camelCase :: Text -> Text
    -camelCase s = Text.concat $ map upCase
    -                          $ Text.splitOn "_" s
    -
    --- | Upper-case the given text.
    -upCase :: Text -> Text
    -upCase = forceCase toUpper
    -
    -
    -parseOp :: OpDef -> ParsedOp
    -parseOp o = ParsedOp
    -    { parsedOpName = makeName $ o ^. name
    -    , parsedOpSummary = o ^. summary
    -    , parsedOpDescription = o ^. description
    -    , parsedOpIsMonadic = o ^. isStateful
    -                            || any (isRefKind . parsedArgKind) parsedInputs
    -    , ..
    -    }
    -  where
    -    parsedInputs = zipWith (\a v -> parseArg a (inputTensorKind a v))
    -                (o ^. inputArg) tensorKindParams
    -    tensorKindParams = ["v" <> Text.pack (show x) | x <- [1::Integer ..]]
    -    parsedOutputs = map (\a -> parseArg a (outputTensorKind a)) (o ^. outputArg)
    -    explicitInputAttrs = sortBy (comparing (tfName . attrName))
    -                        $ mapMaybeAttrs (getExplicitInputAttr implicitAttrs)
    -                        $ o ^. attr
    -    inferredTypeAttrs = mapMaybeAttrs getInferredTypeAttr $ o ^. attr
    -    inferredListSizeAttrs = mapMaybeAttrs (getInferredListSizeAttr parsedInputs)
    -                                $ o ^. attr
    -    implicitAttrs = Set.fromList $ map tfName $
    -                        map attrName inferredTypeAttrs
    -                            ++ map attrName inferredListSizeAttrs
    -
    --- TODO(judahjacobson): Some arguments should be refs.
    -inputTensorKind :: OpDef'ArgDef -> Text -> ArgKind
    -inputTensorKind a v
    -    | a ^. type' == DT_RESOURCE = ArgResource
    -    | a ^. isRef = ArgTensorRef
    -    | otherwise = ArgTensorEither v
    -
    -outputTensorKind :: OpDef'ArgDef -> ArgKind
    -outputTensorKind a
    -    | a ^. type' == DT_RESOURCE = ArgResource
    -    | a ^. isRef = ArgTensorRef
    -    | otherwise = ArgTensorValue
    -
    -getExplicitInputAttr :: Set.Set TFName -> OpDef'AttrDef -> Maybe AttrType
    -getExplicitInputAttr implicitAttrs a
    -    | TFName (a ^. name) `Set.notMember` implicitAttrs
    -    , a ^. maybe'defaultValue == Nothing
    -    , t <- parseAttrType (a ^. type')
    -    , t `elem` map AttrSingle [AttrBool, AttrInt64, AttrFloat, AttrShape] = Just t
    -    | otherwise = Nothing
    -
    -getInferredTypeAttr :: OpDef'AttrDef -> Maybe [DataType]
    -getInferredTypeAttr a
    -    | a ^. type' == "type" = Just $ a ^. allowedValues . list . type'
    -    | otherwise = Nothing
    -
    -getInferredListSizeAttr :: [ParsedArg] -> OpDef'AttrDef -> Maybe (NonEmpty Name)
    -getInferredListSizeAttr inputs a
    -    | a ^. type' == "int"
    -        = nonEmpty [t | ParsedArg { parsedArgName = t
    -                                  , parsedArgCase
    -                                        = ListArg { argLength = n }
    -                                  } <- inputs
    -                      , TFName (a ^. name) == tfName n]
    -    | otherwise = Nothing
    -    
    --- | Like mapMaybe, but associates the attribute name/description with the given info.
    -mapMaybeAttrs :: (OpDef'AttrDef -> Maybe a) -> [OpDef'AttrDef] -> [Attr a]
    -mapMaybeAttrs f = mapMaybe $ \a -> do
    -                            x <- f a
    -                            Just Attr
    -                                { attrName = makeName (a ^. name)
    -                                , attrDescription = a ^. description
    -                                , attrInfo = x
    -                                }
    -  
    -parseArg :: OpDef'ArgDef -> ArgKind -> ParsedArg
    -parseArg a tKind = ParsedArg
    -    { parsedArgName = makeName (a ^. name)
    -    , parsedArgDescription = a ^. description
    -    , parsedArgCase = parseArgCase a
    -    , parsedArgKind = tKind
    -    }
    -
    -parseArgCase :: OpDef'ArgDef -> ParsedArgCase
    -parseArgCase a
    -    | Just n <- maybeAttr (a ^. typeListAttr) = MixedListArg n
    -    | Just n <- maybeAttr (a ^. numberAttr) = ListArg n thisArgType
    -    | otherwise = SimpleArg thisArgType
    -  where
    -    thisArgType
    -        | Just n <- maybeAttr (a ^. typeAttr) = ArgTypeAttr n
    -        | a ^. type' == DT_RESOURCE = ArgTypeAttr (makeName "dtype")
    -        | otherwise = ArgTypeFixed (a ^. type')
    -    maybeAttr :: Text -> Maybe Name
    -    maybeAttr "" = Nothing
    -    maybeAttr t = Just $ makeName t
    -
    -parseAttrType :: Text -> AttrType
    -parseAttrType = \case
    -    "string" -> AttrSingle AttrBytes
    -    "int" -> AttrSingle AttrInt64 
    -    "float" -> AttrSingle AttrFloat 
    -    "bool" -> AttrSingle AttrBool 
    -    "type" -> AttrSingle AttrType 
    -    "shape" -> AttrSingle AttrShape 
    -    "tensor" -> AttrSingle AttrTensor 
    -    "list(string)" -> AttrList AttrBytes
    -    "list(int)" -> AttrList AttrInt64
    -    "list(float)" -> AttrList AttrFloat
    -    "list(bool)" -> AttrList AttrBool
    -    "list(type)" -> AttrList AttrType
    -    "list(shape)" -> AttrList AttrShape
    -    "list(tensor)" -> AttrList AttrTensor
    -    t -> error $ "parseAttrType: unrecognized type " ++ show t
    -
    - diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen.html b/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen.html deleted file mode 100644 index 66622b8..0000000 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/src/TensorFlow-OpGen.html +++ /dev/null @@ -1,428 +0,0 @@ - - - - - -src/TensorFlow/OpGen.hs - - - -
    -- Copyright 2016 TensorFlow authors.
    ---
    --- Licensed under the Apache License, Version 2.0 (the "License");
    --- you may not use this file except in compliance with the License.
    --- You may obtain a copy of the License at
    ---
    ---     http://www.apache.org/licenses/LICENSE-2.0
    ---
    --- Unless required by applicable law or agreed to in writing, software
    --- distributed under the License is distributed on an "AS IS" BASIS,
    --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    --- See the License for the specific language governing permissions and
    --- limitations under the License.
    -
    -{-# LANGUAGE FlexibleContexts #-}
    -{-# LANGUAGE LambdaCase #-}
    -{-# LANGUAGE OverloadedStrings #-}
    -{-# LANGUAGE TypeFamilies #-}
    -{- | Rendering of TensorFlow operations as Haskell functions.
    -
    -The basic type signature generated for each op is:
    -
    -> {constraints} => {mandatory attrs} -> {input tensors} -> {output tensors}
    -
    -where:
    -
    -* @{mandatory attrs}@ is of the form @A_1 -> ... -> A_N@, where each @A@ is an
    - op attribute that doesn't have a default and can't be inferred from other
    - inputs.
    -
    -* @{constraints}@ restrict the type parameters of the input and output tensors
    - (for example: 'TensorType' or 'OneOf').
    -
    -* @{input tensors}@ is of the form @T_1 -> ... -> T_N@, where each @T@ is of
    -the form @Tensor Ref a@, @Tensor v a@ or @ResourceHandle a@ (or a list of one
    -of those types), and @a@ is either a concrete type or a (constrained) type
    -variable.
    -
    -* @{output tensors}@ is of the form @(T_1,...,T_N)@ for "pure" ops, and
    -@Build (T_1,...,T_N)@ for "stateful" ops.  An op is considered "stateful" if
    -it takes a @Tensor Ref@ or @ResourceHandle@ as input, or if it's explicitly
    -marked \"Stateful\" in its @REGISTER_OP@ definition.  (If there are no outputs,
    -it is either @ControlNode@ or @Build ControlNode@.)
    --}
    -
    -module TensorFlow.OpGen
    -  ( OpGenFlags(..)
    -  , docOpList
    -  , flagParser)
    -  where
    -
    -import Data.Foldable (toList)
    -import Data.Maybe (fromMaybe)
    -import Data.ProtoLens (def, showMessage)
    -import Data.List.NonEmpty (NonEmpty)
    -import qualified Data.List.NonEmpty as NE
    -import Lens.Family2 ((^.), (.~), (&), view)
    -import Options.Applicative (Parser, help, long, strOption, value)
    -import Proto.Tensorflow.Core.Framework.OpDef
    -  ( OpList
    -  , OpDef
    -  , attr
    -  , inputArg
    -  , name
    -  , op
    -  , outputArg
    -  )
    -import Proto.Tensorflow.Core.Framework.Types (DataType(..))
    -import System.FilePath (takeBaseName)
    -import TensorFlow.OpGen.ParsedOp
    -import Text.PrettyPrint.Mainland
    -  ( Doc
    -  , (<>)
    -  , (<+>)
    -  , (</>)
    -  , (<+/>)
    -  , brackets
    -  , comma
    -  , commasep
    -  , dquotes
    -  , empty
    -  , enclose
    -  , flatten
    -  , folddoc
    -  , hang
    -  , indent
    -  , parens
    -  , sep
    -  , stack
    -  , strictText
    -  , tuple
    -  )
    -import qualified Data.Set as Set
    -import qualified Data.Text as Text
    -
    -data OpGenFlags = OpGenFlags
    -     { outputFile :: String
    -     , prefix :: String
    -     , excludeList :: String
    -     }
    -
    -flagParser :: Parser OpGenFlags
    -flagParser = OpGenFlags
    -     <$> strOption (mconcat [ long "output"
    -                            , help "File to write."
    -                            ])
    -     <*> strOption (mconcat [ long "prefix"
    -                            , help "Haskell package prefix to use"
    -                            ])
    -     <*> strOption (mconcat [ long "exclude_list"
    -                            , value ""
    -                            , help "Comma separated Ops names to ignore"
    -                            ])
    -
    -
    -docOpList :: OpGenFlags -> OpList -> Doc
    -docOpList flags opList =
    -  stack [ "{-# LANGUAGE ConstraintKinds #-}"
    -        , "{-# LANGUAGE DataKinds #-}"
    -        , "{-# LANGUAGE FlexibleInstances #-}"
    -        , "{-# LANGUAGE OverloadedStrings #-}"
    -        , "{-# LANGUAGE ScopedTypeVariables #-}"
    -          -- Avoids reports about shadowing standard library names.
    -        , "{-# OPTIONS_GHC -fno-warn-name-shadowing #-}"
    -          -- eqLengthGuard never returns false and dies instead.
    -        , "{-# OPTIONS_GHC -fno-warn-incomplete-patterns #-}"
    -        , "module" <+> strictText moduleName <+> "where"
    -        , empty
    -        , imports
    -        , empty
    -        , folddoc (\x y -> x </> empty </> y)
    -                  (map renderOpAndExtras $
    -                   filter (not . flip elem exclusions . view name) $
    -                   toList $ opList ^. op)
    -        ]
    -  where moduleName =
    -            Text.pack (prefix flags) <> "." <> camelCase
    -             -- Discards the optional trailing _ops_op_lib
    -            (fromMaybe shortName (Text.stripSuffix "_ops_op_lib" shortName))
    -        shortName = Text.pack (takeBaseName $ outputFile flags)
    -        exclusions = Text.splitOn "," $ Text.pack $ excludeList flags
    -        renderOpAndExtras o = renderOp (parseOp o) </> extras o
    -
    -imports :: Doc
    -imports = stack [
    -      "import Data.ByteString (ByteString)"
    -    , "import Data.Complex (Complex)"
    -    , "import Data.Int (Int8, Int16, Int32, Int64)"
    -    , "import Data.Word (Word8, Word16)"
    -    , "import Lens.Family2 ((.~), (&))"
    -    , "import TensorFlow.Build"
    -    , "import TensorFlow.BuildOp"
    -    , "import TensorFlow.Output (ResourceHandle)"
    -    , "import TensorFlow.Tensor"
    -    , "import TensorFlow.Types"
    -    ]
    -
    -renderHaskellName, renderTFName, renderQuotedTFName :: Name -> Doc
    -renderHaskellName = strictText . unHaskellName . haskellName
    -renderTFName = strictText . unTFName . tfName
    -renderQuotedTFName = dquotes . renderTFName
    -
    -
    --- | Generate the source code for a single op.
    --- For example:
    ---
    --- -- | {haddock comment}
    --- foo :: {type sig}
    --- foo attr1 attr2 input1 input2 | eqLengthGuard [...] = {function body}
    -renderOp :: ParsedOp -> Doc
    -renderOp pOp = stack $
    -    [ haddocks
    -    , n <+> "::" <+> hang 0 (typeSig pOp)
    -    , n <+> hang 0 args <+> "|" <+> funcGuard listSizeAttrs
    -                <+> "=" </>  -- args are indented
    -                    -- the body needs to be indented wrt the name
    -                    indent indentation (functionBody pOp)
    -    ] ++ whereClause listSizeAttrs
    -  where
    -    n = renderHaskellName $ parsedOpName pOp
    -    listSizeAttrs = inferredListSizeAttrs pOp
    -    args = sep $ map renderHaskellName
    -               $ map attrName (explicitInputAttrs pOp)
    -                ++ map parsedArgName (parsedInputs pOp)
    -    haddocks = "-- |" <+> multilineComment (parsedOpSummary pOp) (parsedOpDescription pOp)
    -
    --- | A check that all lists of the given size have the given length.
    --- For example:
    ---   eqLengthGuard [("N", [("input1", length input1), ("input2", length input2)])]
    -funcGuard :: [Attr (NonEmpty Name)] -> Doc
    -funcGuard attrs = "eqLengthGuard" <+> brackets (commasep entries)
    -      where
    -        entries =
    -            [ parens $ nAttr <> comma <+>
    -              brackets (commasep $ toList $
    -                            map renderTensorName (toList $ attrInfo a))
    -            | a <- attrs
    -            , let nAttr = renderQuotedTFName (attrName a)
    -            ]
    -        renderTensorName x = parens $ renderQuotedTFName x <> comma <+>
    -                        "length" <+> renderHaskellName x
    -
    --- | Define the implicit list length attributes.
    --- For example:
    ---   where
    ---     n1 = fromIntegral (length input1) :: Int64
    ---     n2 = fromIntegral (length input2) :: Int64
    -whereClause :: [Attr (NonEmpty Name)] -> [Doc]
    -whereClause [] = []
    -whereClause as = [indent 2 $ "where" </> indent 2 (stack $ map defineLengthAttr as)]
    -  where
    -    defineLengthAttr a = renderHaskellName (attrName a) <+> "="
    -                            <+> "fromIntegral (length"
    -                            <+> renderHaskellName (NE.head $ attrInfo a)
    -                            <> ") :: Int64"
    -
    -functionBody :: ParsedOp -> Doc
    -functionBody pOp = buildFunction <+> parens (hang 0 (stack buildOpParts))
    -                        </> indent indentation (sep tensorArgs)
    -  where
    -    buildFunction
    -        | null outputListsSizes = "buildOp"
    -        | otherwise = "buildListOp" <+>
    -                        brackets (commasep $
    -                                    map renderHaskellName outputListsSizes)
    -    outputListsSizes = [ a
    -                       | ParsedArg { parsedArgCase = ListArg { argLength = a } }
    -                            <- parsedOutputs pOp]
    -    buildOpParts =
    -        "opDef" <+> renderQuotedTFName (parsedOpName pOp) :
    -        -- Renders tensor arguments.
    -        [ "& opAttr" <+> renderQuotedTFName n <+>
    -          ".~ tensorType (undefined ::" <+> renderHaskellName n <> ")"
    -        | a <- inferredTypeAttrs pOp, let n = attrName a
    -        ] ++
    -        -- Renders mandatory attributes as function parameters.
    -        [ "& opAttr" <+> renderQuotedTFName n <+> ".~" <+> renderHaskellName n
    -        | a <- explicitInputAttrs pOp, let n = attrName a
    -        ] ++
    -        -- Renders sizes of tensor list types having number_attr.
    -        [ "& opAttr" <+> renderQuotedTFName n <+> ".~" <+> renderHaskellName n
    -        | a <- inferredListSizeAttrs pOp, let n = attrName a
    -        ]
    -
    -    tensorArgs = renderHaskellName . parsedArgName <$> parsedInputs pOp
    -
    --- | Write a comment with the inputs/outputs/attributes in proto format, for
    --- debugging.
    -extras :: OpDef -> Doc
    -extras d = enclose "{-\n" "\n-}" $
    -            strictText $ Text.pack $
    -            showMessage ((def :: OpDef)
    -                        & inputArg .~ (d ^. inputArg)
    -                        & outputArg .~ (d ^. outputArg)
    -                        & attr .~ (d ^. attr))
    -
    --- | The type signature for an op.
    --- Of the form:
    --- forall t1 t2 v1 v2 . (TensorType t1, TensorType t2)
    ---      => Float -> Tensor t1 v1 -> Tensor t2 v2
    --- where "Float" is an explicit input attribute, "Tensor t1 v1" is an input, and
    --- "Tensor t2 v2" is an output.
    -typeSig :: ParsedOp -> Doc
    -typeSig pOp = constraints
    -            <+/> signatureFold (map attrInput (explicitInputAttrs pOp)
    -                                ++ map tensorArgAndComment (parsedInputs pOp)
    -                                ++ [outputs])
    -  where
    -    constraints
    -        | null (inferredTypeAttrs pOp) = empty
    -        | otherwise = "forall" <+> sep typeParams <+> "." <+> classConstraints <+> "=>"
    -    typeParams = [strictText v | k <- parsedInputs pOp ++ parsedOutputs pOp,
    -                  ArgTensorEither v <- [parsedArgKind k]]
    -                ++ [renderHaskellName $ attrName n | n <- inferredTypeAttrs pOp]
    -    classConstraints = tuple $ concatMap tensorArgConstraint
    -                    $ inferredTypeAttrs pOp
    -    signatureFold = folddoc (\x y -> x </> "->" <+> y)
    -    attrInput a = renderAttrType (attrInfo a) <+> hang 0 ("-- ^" <+> attrComment a)
    -    renderAttrType (AttrSingle a) = renderAttrBaseType a
    -    renderAttrType (AttrList a) = brackets $ renderAttrBaseType a
    -    renderAttrBaseType = \case
    -        AttrBytes -> "ByteString"
    -        AttrInt64 -> "Data.Int.Int64"
    -        AttrFloat -> "Float"
    -        AttrBool -> "Bool"
    -        AttrType -> "DataType"
    -        AttrShape -> "Shape"
    -        AttrTensor -> "TensorProto"
    -
    -    tensorArgAndComment t = tensorArg t <+> hang 0 ("-- ^" <+> argComment t)
    -    outputs = case parsedOutputs pOp of
    -        [] -> wrapOutput "ControlNode"
    -        -- TODO(judahjacobson): To improve indentation: `tensorArgAndComment a`
    -        [a] -> wrapOutput (tensorArg a) <+> "-- ^" <+> argComment a
    -        as -> wrapOutput (tuple (map tensorArg as)) <+/> resultComment as
    -    wrapOutput o
    -        | parsedOpIsMonadic pOp = "Build" <+> parens o
    -        | otherwise = o
    -        
    --- | Render an op input or output.
    --- For example: "Tensor Ref Int64", "Tensor v t", "ResourceHandle dtype"
    -tensorArg :: ParsedArg -> Doc
    -tensorArg p = case parsedArgCase p of
    -    SimpleArg { argType = t } -> tensorType t
    -    ListArg { argType = t } -> brackets $ tensorType t
    -    MixedListArg {} -> "{{{tensorArg: can't handle heterogeneous lists}}}"
    -  where
    -    tensorType t = let
    -        v = case parsedArgKind p of
    -                ArgTensorRef -> "Tensor Ref"
    -                ArgTensorValue -> "Tensor Value"
    -                ArgTensorEither v' -> "Tensor" <+> strictText v'
    -                ArgResource -> "ResourceHandle"
    -        a = case t of
    -                ArgTypeFixed dt -> strictText $ dtTypeToHaskell dt
    -                ArgTypeAttr n -> renderHaskellName n
    -        in v <+> a
    -
    -attrComment :: Attr a -> Doc
    -attrComment a = argComment' (attrName a) (attrDescription a)
    -        
    -argComment :: ParsedArg -> Doc
    -argComment a = argComment' (parsedArgName a) (parsedArgDescription a)
    -
    -argComment' :: Name -> Text.Text -> Doc
    -argComment' argName argDesc =
    -    bold (renderTFName argName) <> splitMultilineText (":" <+>) argDesc
    -
    -bold :: Doc -> Doc
    -bold n = "__" <> n <> "__"
    -
    --- | Comment for the outputs of an op.
    --- For example:
    ---   -- ^ (__output1__, __output2__)
    ---   -- 
    ---   -- * __output1__: description1
    ---   --
    ---   -- * __output2__: description2
    -resultComment :: [ParsedArg] -> Doc
    -resultComment os = stack $ flatten commentSummary : map commentDetails os
    -  where
    -    commentSummary = "-- ^" <+> tuple [bold (renderTFName $ parsedArgName o) | o <- os]
    -    commentDetails o =
    -        stack [ "--"
    -              , "-- *" <+> argComment o
    -              ]
    -
    --- | Constraints for a given type parameter.
    --- E.g.: ["TensorType t"] or ["TensorType t", "OneOf [Int64, Float] t"]
    -tensorArgConstraint :: Attr [DataType] -> [Doc]
    -tensorArgConstraint a
    -    = ("TensorType" <+> n
    -        : if null typeList
    -            then []
    -            else ["OneOf" <+> "'" <> brackets (commasep typeList) <+> n])
    -  where
    -    n = renderHaskellName $ attrName a
    -    typeList = map strictText $
    -                    Set.toList $ Set.fromList $
    -                    map dtTypeToHaskell $ attrInfo a
    -
    --- NOTE: The cases of this function should be kept in sync with
    --- TensorFlow.Types.AllTensorTypes.
    -dtTypeToHaskell :: DataType -> Text.Text
    -dtTypeToHaskell DT_BOOL = "Bool"
    -dtTypeToHaskell DT_BFLOAT16 = "Data.Word.Word16"
    -dtTypeToHaskell DT_COMPLEX128 = "(Data.Complex.Complex Double)"
    -dtTypeToHaskell DT_COMPLEX64 = "(Data.Complex.Complex Float)"
    -dtTypeToHaskell DT_DOUBLE = "Double"
    -dtTypeToHaskell DT_FLOAT = "Float"
    -dtTypeToHaskell DT_INT16 = "Data.Int.Int16"
    -dtTypeToHaskell DT_INT32 = "Data.Int.Int32"
    -dtTypeToHaskell DT_INT64 = "Data.Int.Int64"
    -dtTypeToHaskell DT_INT8 = "Data.Int.Int8"
    -dtTypeToHaskell DT_QINT32 = "Data.Int.Int32"  -- TODO(gnezdo): make unique
    -dtTypeToHaskell DT_QINT8 = "Data.Word.Word8"  -- TODO(gnezdo): make unique
    -dtTypeToHaskell DT_QINT16 = "Data.Int.Int16"  -- TODO(gnezdo): make unique
    -dtTypeToHaskell DT_QUINT16 = "Data.Word.Word16"  -- TODO(gnezdo): make unique
    -dtTypeToHaskell DT_QUINT8 = "Data.Word.Word8"  -- TODO(gnezdo): make unique
    -dtTypeToHaskell DT_STRING = "Data.ByteString.ByteString"
    -dtTypeToHaskell DT_UINT16 = "Data.Word.Word16"
    -dtTypeToHaskell DT_HALF = "Data.Word.Word16"  -- TODO(gnezdo): make unique
    -dtTypeToHaskell DT_UINT8 = "Data.Word.Word8"
    -dtTypeToHaskell DT_RESOURCE =
    -    error "ResourceHandle must be prevented from getting here."
    -dtTypeToHaskell x =
    -    Text.pack $ "Unsupported type in dtTypeToHaskell: " ++ show x
    -
    --- | haddockComment escapes TensorFlow doc strings into haddock.
    --- TODO(gnezdo): deal with the markup.
    -haddockComment :: Text.Text -> Doc
    -haddockComment = strictText
    -
    --- | Generate a multiline comment.  For example:
    ---   summary'
    ---   --
    ---   -- detail_line1
    ---   -- detail_line2
    ---   -- ...
    -multilineComment :: Text.Text -> Text.Text -> Doc
    -multilineComment summary' detail =
    -    haddockComment summary' </>
    -    splitMultilineText insertParagraphAndComment detail
    -  where insertParagraphAndComment x = "--" </> "--" <+> x
    -
    --- | Converts the given multi-line detail string into
    --- a multi-line haddock. Applies the given lead to the
    --- first line. Returns an empty document for empty detail.
    -splitMultilineText :: (Doc -> Doc) -> Text.Text -> Doc
    -splitMultilineText lead detail =
    -  case Text.lines detail of
    -    [] -> empty
    -    (l : ls) -> stack $ lead (haddockComment l)
    -                      : map (("--" <+>) . haddockComment) ls
    -
    -indentation :: Int
    -indentation = 4
    -
    - diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-opgen-0.1.0.0/src/hscolour.css deleted file mode 100644 index c15919e..0000000 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/src/hscolour.css +++ /dev/null @@ -1,5 +0,0 @@ -.hs-keyglyph, .hs-layout {color: red;} -.hs-keyword {color: blue;} -.hs-comment, .hs-comment a {color: green;} -.hs-str, .hs-chr {color: teal;} -.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt b/docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt index f924517..79cc37a 100644 --- a/docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt +++ b/docs/haddock/tensorflow-opgen-0.1.0.0/tensorflow-opgen.txt @@ -14,7 +14,7 @@ -- the generated code. module TensorFlow.OpGen.ParsedOp data ParsedOp -ParsedOp :: Name -> Text -> Text -> [ParsedArg] -> [ParsedArg] -> [Attr AttrType] -> [Attr [DataType]] -> [Attr (NonEmpty Name)] -> Bool -> ParsedOp +ParsedOp :: Name -> Text -> Text -> [ParsedArg] -> [ParsedArg] -> [Attr AttrType] -> [Attr TypeParam] -> [Attr (NonEmpty Name)] -> Bool -> ParsedOp [parsedOpName] :: ParsedOp -> Name [parsedOpSummary] :: ParsedOp -> Text [parsedOpDescription] :: ParsedOp -> Text @@ -25,10 +25,8 @@ ParsedOp :: Name -> Text -> Text -> [ParsedArg] -> [ParsedArg] -> [Attr AttrType -- Associated with the type of the attribute. [explicitInputAttrs] :: ParsedOp -> [Attr AttrType] --- | Attributes that are type parameters. Associated with the list of --- allowed types (see: TensorFlow.Types.OneOf). If this list is empty, --- then any type is acceptable. -[inferredTypeAttrs] :: ParsedOp -> [Attr [DataType]] +-- | Attributes that are type parameters. +[inferredTypeAttrs] :: ParsedOp -> [Attr TypeParam] [inferredListSizeAttrs] :: ParsedOp -> [Attr (NonEmpty Name)] -- | Whether this op is stateful or takes a stateful input. Such ops should @@ -69,26 +67,36 @@ AttrBool :: AttrBaseType AttrType :: AttrBaseType AttrShape :: AttrBaseType AttrTensor :: AttrBaseType +data TypeParam +TypeParam :: Bool -> Maybe (NonEmpty DataType) -> TypeParam +[typeParamIsList] :: TypeParam -> Bool + +-- | The list of allowed types (see: TensorFlow.Types.OneOf). If +-- Nothing, then any type is acceptable. +[typeParamRestrictions] :: TypeParam -> Maybe (NonEmpty DataType) -- | An input or output argument (Tensor) for an op. data ParsedArg -ParsedArg :: Name -> Text -> ParsedArgCase -> ArgKind -> ParsedArg +ParsedArg :: Name -> Text -> ParsedArgCase -> ParsedArg [parsedArgName] :: ParsedArg -> Name [parsedArgDescription] :: ParsedArg -> Text [parsedArgCase] :: ParsedArg -> ParsedArgCase -[parsedArgKind] :: ParsedArg -> ArgKind data ParsedArgCase -SimpleArg :: ArgType -> ParsedArgCase +SimpleArg :: ArgType -> ArgKind -> ParsedArgCase [argType] :: ParsedArgCase -> ArgType -ListArg :: Name -> ArgType -> ParsedArgCase +[argCaseKind] :: ParsedArgCase -> ArgKind +ListArg :: Name -> ArgType -> ArgKind -> ParsedArgCase -- | The attribute that specifies this list's length. [argLength] :: ParsedArgCase -> Name [argType] :: ParsedArgCase -> ArgType +[argCaseKind] :: ParsedArgCase -> ArgKind --- | A heterogeneous list. TODO(judahjacobson): Implement this. -MixedListArg :: Name -> ParsedArgCase +-- | A heterogeneous list. +MixedListArg :: Name -> ArgKind -> ParsedArgCase [argTypeAttr] :: ParsedArgCase -> Name +[argCaseKind] :: ParsedArgCase -> ArgKind +ResourceArg :: ParsedArgCase -- | The type of an argument. data ArgType @@ -101,10 +109,12 @@ ArgTypeAttr :: Name -> ArgType data ArgKind ArgTensorRef :: ArgKind ArgTensorValue :: ArgKind -ArgTensorEither :: Text -> ArgKind -ArgResource :: ArgKind +ArgTensorBuild :: ArgKind +ArgSomeTensor :: Text -> ArgKind +argKind :: ParsedArgCase -> Maybe ArgKind parseOp :: OpDef -> ParsedOp camelCase :: Text -> Text +instance GHC.Classes.Eq TensorFlow.OpGen.ParsedOp.ArgKind instance GHC.Classes.Eq TensorFlow.OpGen.ParsedOp.AttrType instance GHC.Classes.Eq TensorFlow.OpGen.ParsedOp.AttrBaseType instance GHC.Classes.Ord TensorFlow.OpGen.ParsedOp.TFName @@ -130,7 +140,7 @@ instance GHC.Classes.Eq TensorFlow.OpGen.ParsedOp.TFName -- OneOf). --
  • {input tensors} is of the form T_1 -> ... -> -- T_N, where each T is of the form Tensor Ref a, --- Tensor v a or ResourceHandle a (or a list of one of +-- Tensor v a or ResourceHandle (or a list of one of -- those types), and a is either a concrete type or a -- (constrained) type variable.
  • --
  • {output tensors} is of the form (T_1,...,T_N) diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html index 3603b49..1575c69 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-EmbeddingOps.html @@ -1,12 +1,12 @@ TensorFlow.EmbeddingOps

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.EmbeddingOps

    Description

    Parallel lookups on the list of tensors.

    Synopsis

    Documentation

    embeddingLookup Source

    Arguments

    :: (TensorType a, OneOf `[Int64, Int32]` b, Num b) 
    => [Tensor v a]

    A list of tensors which can be concatenated along +

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.EmbeddingOps

    Description

    Parallel lookups on the list of tensors.

    Synopsis

    Documentation

    embeddingLookup

    Arguments

    :: (MonadBuild m, Rendered v1, TensorType a, OneOf `[Int64, Int32]` b, Num b) 
    => [Tensor v1 a]

    A list of tensors which can be concatenated along dimension 0. Each Tensor must be appropriately - sized for mod partition strategy.

    -> Tensor Value b

    A Tensor with type int32 or int64 + sized for mod partition strategy.

    -> Tensor v2 b

    A Tensor with type int32 or int64 containing the ids to be looked up in params. The ids are required to have fewer than 2^31 - entries.

    -> Build (Tensor Value a)

    A dense tensor with shape `shape(ids) + shape(params)[1:]`.

    Looks up ids in a list of embedding tensors.

    This function is used to perform parallel lookups on the list of + entries.

    -> m (Tensor Value a)

    A dense tensor with shape `shape(ids) + shape(params)[1:]`.

    Looks up ids in a list of embedding tensors.

    This function is used to perform parallel lookups on the list of tensors in params. It is a generalization of gather, where params is interpreted as a partition of a larger embedding tensor.

    The partition_strategy is "mod", we assign each id to partition diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html index 952f801..458ec9d 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Gradient.html @@ -1,4 +1,4 @@ TensorFlow.Gradient

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Gradient

    Synopsis

    Documentation

    gradients Source

    Arguments

    :: (Num (Tensor v1 a), v1 ~ Value, GradientCompatible a) 
    => Tensor v1 a

    The output of the graph.

    -> [Tensor v2 a]

    Tensors for which gradients are computed.

    -> Build [Tensor Value a] 

    Gradient of y w.r.t. each element of xs.

    \ No newline at end of file +

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Gradient

    Synopsis

    Documentation

    gradients

    Arguments

    :: (MonadBuild m, Rendered v2, GradientCompatible a) 
    => Tensor v1 a

    The output of the graph.

    -> [Tensor v2 a]

    Tensors for which gradients are computed.

    -> m [Tensor Value a] 

    Gradient of y w.r.t. each element of xs.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html index ed54435..d34f268 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/TensorFlow-Ops.html @@ -1,7 +1,7 @@ TensorFlow.Ops

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Ops

    Description

    This module contains definitions for some built-in TensorFlow operations.

    Note that certain, "stateful" ops like variable and assign return a +

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Ops

    Description

    This module contains definitions for some built-in TensorFlow operations.

    Note that certain, "stateful" ops like variable and assign return a Build action (e.g., Build (Tensor Ref a) instead of a pure value; the returned Tensors are always rendered in the current Build context. This approach helps us avoid problems with inlining or common subexpression @@ -18,27 +18,43 @@ in w * w

    since the latter could be reasonably transformed by the compile w' = assign v 3 in w * w'

    Ops should return a Build action if their original OpDef marks them as stateful, or if they take any Refs as input. (This mirrors the rules that - TensorFlow uses to avoid common subexpression elimination.)

    Synopsis

    Documentation

    add

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns x + y element-wise.

    • NOTE*: Add supports broadcasting. AddN does not. More about broadcasting - here

    abs

    Arguments

    :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes the absolute value of a tensor.

    Given a tensor x, this operation returns a tensor containing the absolute + TensorFlow uses to avoid common subexpression elimination.)

    Synopsis

    Documentation

    add

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t 
    => Tensor v'1 t

    x

    -> Tensor v'2 t

    y

    -> Tensor Build t

    z

    Returns x + y element-wise.

    • NOTE*: Add supports broadcasting. AddN does not. More about broadcasting + here

    add'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t 
    => OpParams 
    -> Tensor v'1 t

    x

    -> Tensor v'2 t

    y

    -> Tensor Build t

    z

    abs

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t 
    => Tensor v'1 t

    x

    -> Tensor Build t

    y

    Computes the absolute value of a tensor.

    Given a tensor x, this operation returns a tensor containing the absolute value of each element in x. For example, if x is an input element and y is - an output element, this operation computes \(y = |x|\).

    addN

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) 
    => [Tensor v1 t]

    inputs: Must all be the same size and shape.

    -> Tensor Value t

    sum

    Add all input tensors element wise.

    argMax

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => Tensor v1 t

    input

    -> Tensor v2 tidx

    dimension: int32, 0 <= dimension < rank(input). Describes which dimension - of the input Tensor to reduce across. For vectors, use dimension = 0.

    -> Tensor Value Int64

    output

    Returns the index with the largest value across dimensions of a tensor.

    assign

    Arguments

    :: TensorType t 
    => Tensor Ref t

    ref: Should be from a Variable node. May be uninitialized.

    -> Tensor v2 t

    value: The value to be assigned to the variable.

    -> Build (Tensor Ref t)

    output_ref: = Same as "ref". Returned as a convenience for operations that want + an output element, this operation computes \(y = |x|\).

    abs'

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t 
    => OpParams 
    -> Tensor v'1 t

    x

    -> Tensor Build t

    y

    addN

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
    => [Tensor v'1 t]

    inputs: Must all be the same size and shape.

    -> Tensor Build t

    sum

    Add all input tensors element wise.

    addN'

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
    => OpParams 
    -> [Tensor v'1 t]

    inputs: Must all be the same size and shape.

    -> Tensor Build t

    sum

    argMax

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => Tensor v'1 t

    input

    -> Tensor v'2 tidx

    dimension: int32, 0 <= dimension < rank(input). Describes which dimension + of the input Tensor to reduce across. For vectors, use dimension = 0.

    -> Tensor Build Int64

    output

    Returns the index with the largest value across dimensions of a tensor.

    argMax'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => OpParams 
    -> Tensor v'1 t

    input

    -> Tensor v'2 tidx

    dimension: int32, 0 <= dimension < rank(input). Describes which dimension + of the input Tensor to reduce across. For vectors, use dimension = 0.

    -> Tensor Build Int64

    output

    assign

    Arguments

    :: forall (v'2 :: * -> *) (m' :: * -> *). (MonadBuild m', TensorType t) 
    => Tensor Ref t

    ref: Should be from a Variable node. May be uninitialized.

    -> Tensor v'2 t

    value: The value to be assigned to the variable.

    -> m' (Tensor Ref t)

    output_ref: = Same as "ref". Returned as a convenience for operations that want to use the new value after the variable has been reset.

    Update ref by assigning value to it.

    This operation outputs "ref" after the assignment is done. - This makes it easier to chain operations that need to use the reset value.

    broadcastGradientArgs

    Arguments

    :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) t) 
    => Tensor v1 t

    s0

    -> Tensor v2 t

    s1

    -> (Tensor Value t, Tensor Value t)

    (r0, r1)

    • r0
    • r1

    Return the reduction indices for computing gradients of s0 op s1 with broadcast.

    This is typically used by gradient computations for a broadcasting operation.

    cast

    Arguments

    :: (TensorType srcT, TensorType dstT) 
    => Tensor v1 srcT

    x

    -> Tensor Value dstT

    y

    Cast x of type SrcT to y of DstT.

    concat

    Arguments

    :: TensorType t 
    => Tensor v1 Int32

    concat_dim: 0-D. The dimension along which to concatenate. Must be in the - range [0, rank(values)).

    -> [Tensor v2 t]

    values: The N Tensors to concatenate. Their ranks and types must match, - and their sizes must match in all dimensions except concat_dim.

    -> Tensor Value t

    output: A Tensor with the concatenation of values stacked along the + This makes it easier to chain operations that need to use the reset value.

    assign'

    Arguments

    :: forall (v'2 :: * -> *) (m' :: * -> *). (MonadBuild m', TensorType t) 
    => OpParams 
    -> Tensor Ref t

    ref: Should be from a Variable node. May be uninitialized.

    -> Tensor v'2 t

    value: The value to be assigned to the variable.

    -> m' (Tensor Ref t)

    output_ref: = Same as "ref". Returned as a convenience for operations that want + to use the new value after the variable has been reset.

    broadcastGradientArgs

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ([] *))) t 
    => Tensor v'1 t

    s0

    -> Tensor v'2 t

    s1

    -> (Tensor Build t, Tensor Build t)

    (r0, r1)

    • r0
    • r1

    Return the reduction indices for computing gradients of s0 op s1 with broadcast.

    This is typically used by gradient computations for a broadcasting operation.

    broadcastGradientArgs'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ([] *))) t 
    => OpParams 
    -> Tensor v'1 t

    s0

    -> Tensor v'2 t

    s1

    -> (Tensor Build t, Tensor Build t)

    (r0, r1)

    • r0
    • r1

    cast

    Arguments

    :: forall (v'1 :: * -> *). (TensorType srcT, TensorType dstT) 
    => Tensor v'1 srcT

    x

    -> Tensor Build dstT

    y

    Cast x of type SrcT to y of DstT.

    cast'

    Arguments

    :: forall (v'1 :: * -> *). (TensorType srcT, TensorType dstT) 
    => OpParams 
    -> Tensor v'1 srcT

    x

    -> Tensor Build dstT

    y

    concat

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). TensorType t 
    => Tensor v'1 Int32

    concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

    -> [Tensor v'2 t]

    values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

    -> Tensor Build t

    output: A Tensor with the concatenation of values stacked along the concat_dim dimension. This tensor's shape matches that of values except - in concat_dim where it has the sum of the sizes.

    Concatenates tensors along one dimension.

    constant :: forall a. TensorType a => Shape -> [a] -> Tensor Value a Source

    Create a constant tensor.

    The values should be in row major order, e.g.,

    element 0: index (0, ..., 0) + in concat_dim where it has the sum of the sizes.

    Concatenates tensors along one dimension.

    concat'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). TensorType t 
    => OpParams 
    -> Tensor v'1 Int32

    concat_dim: 0-D. The dimension along which to concatenate. Must be in the + range [0, rank(values)).

    -> [Tensor v'2 t]

    values: The N Tensors to concatenate. Their ranks and types must match, + and their sizes must match in all dimensions except concat_dim.

    -> Tensor Build t

    output: A Tensor with the concatenation of values stacked along the + concat_dim dimension. This tensor's shape matches that of values except + in concat_dim where it has the sum of the sizes.

    constant :: TensorType a => Shape -> [a] -> Tensor Build a

    Create a constant tensor.

    The values should be in row major order, e.g.,

    element 0: index (0, ..., 0) element 1: index (0, ..., 1) - ...

    equal

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value Bool

    z

    Returns the truth value of (x == y) element-wise.

    • NOTE*: Equal supports broadcasting. More about broadcasting - here

    initializedVariable :: forall a. TensorType a => Tensor Value a -> Build (Tensor Ref a) Source

    Creates a variable initialized to the given value. - Initialization happens next time session runs.

    zeroInitializedVariable :: (TensorType a, Num a) => Shape -> Build (Tensor Ref a) Source

    Creates a zero-initialized variable with the given shape.

    fill

    Arguments

    :: TensorType t 
    => Tensor v1 Int32

    dims: 1-D. Represents the shape of the output tensor.

    -> Tensor v2 t

    value: 0-D (scalar). Value to fill the returned tensor.

    compatibility(numpy) + ...

    constant' :: forall a. TensorType a => OpParams -> Shape -> [a] -> Tensor Build a

    equal

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t 
    => Tensor v'1 t

    x

    -> Tensor v'2 t

    y

    -> Tensor Build Bool

    z

    Returns the truth value of (x == y) element-wise.

    • NOTE*: Equal supports broadcasting. More about broadcasting + here

    equal'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t 
    => OpParams 
    -> Tensor v'1 t

    x

    -> Tensor v'2 t

    y

    -> Tensor Build Bool

    z

    initializedVariable :: (MonadBuild m, TensorType a) => Tensor v a -> m (Tensor Ref a)

    Creates a variable initialized to the given value. + Initialization happens next time session runs.

    zeroInitializedVariable :: (MonadBuild m, TensorType a, Num a) => Shape -> m (Tensor Ref a)

    Creates a zero-initialized variable with the given shape.

    fill

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). TensorType t 
    => Tensor v'1 Int32

    dims: 1-D. Represents the shape of the output tensor.

    -> Tensor v'2 t

    value: 0-D (scalar). Value to fill the returned tensor.

    compatibility(numpy) Equivalent to np.full - end_compatibility

    -> Tensor Value t

    output

    Creates a tensor filled with a scalar value.

    This operation creates a tensor of shape dims and fills it with value.

    For example:

    ```prettyprint + end_compatibility

    -> Tensor Build t

    output

    Creates a tensor filled with a scalar value.

    This operation creates a tensor of shape dims and fills it with value.

    For example:

    ```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9) ==> [[9, 9, 9] [9, 9, 9]] - ```

    oneHot

    Arguments

    :: (TensorType t, TensorType tI, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) 
    => Tensor v1 tI

    indices: A tensor of indices.

    -> Tensor v2 Int32

    depth: A scalar defining the depth of the one hot dimension.

    -> Tensor v3 t

    on_value: A scalar defining the value to fill in output when `indices[j] = i`.

    -> Tensor v4 t

    off_value: A scalar defining the value to fill in output when `indices[j] != i`.

    -> Tensor Value t

    output: The one-hot tensor.

    Returns a one-hot tensor.

    The locations represented by indices in indices take value on_value, + ```

    fill'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). TensorType t 
    => OpParams 
    -> Tensor v'1 Int32

    dims: 1-D. Represents the shape of the output tensor.

    -> Tensor v'2 t

    value: 0-D (scalar). Value to fill the returned tensor.

    compatibility(numpy) + Equivalent to np.full + end_compatibility

    -> Tensor Build t

    output

    identity

    Arguments

    :: forall (v'1 :: * -> *). TensorType t 
    => Tensor v'1 t

    input

    -> Tensor Build t

    output

    Return a tensor with the same shape and contents as the input tensor or value.

    identity'

    Arguments

    :: forall (v'1 :: * -> *). TensorType t 
    => OpParams 
    -> Tensor v'1 t

    input

    -> Tensor Build t

    output

    matMul

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t 
    => Tensor v'1 t

    a

    -> Tensor v'2 t

    b

    -> Tensor Build t

    product

    Multiply the matrix "a" by the matrix "b".

    The inputs must be two-dimensional matrices and the inner dimension of + "a" (after being transposed if transpose_a is true) must match the + outer dimension of "b" (after being transposed if transposed_b is + true).

    • Note*: The default kernel implementation for MatMul on GPUs uses + cublas.

    matMul'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t 
    => OpParams 
    -> Tensor v'1 t

    a

    -> Tensor v'2 t

    b

    -> Tensor Build t

    product

    mean

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => Tensor v'1 t

    input: The tensor to reduce.

    -> Tensor v'2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Build t

    output: The reduced tensor.

    Computes the mean of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless + keep_dims is true, the rank of the tensor is reduced by 1 for each entry in + reduction_indices. If keep_dims is true, the reduced dimensions are + retained with length 1.

    mean'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => OpParams 
    -> Tensor v'1 t

    input: The tensor to reduce.

    -> Tensor v'2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Build t

    output: The reduced tensor.

    mul

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
    => Tensor v'1 t

    x

    -> Tensor v'2 t

    y

    -> Tensor Build t

    z

    Returns x * y element-wise.

    • NOTE*: Mul supports broadcasting. More about broadcasting + here

    mul'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t 
    => OpParams 
    -> Tensor v'1 t

    x

    -> Tensor v'2 t

    y

    -> Tensor Build t

    z

    neg

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
    => Tensor v'1 t

    x

    -> Tensor Build t

    y

    Computes numerical negative value element-wise.

    I.e., \(y = -x\).

    neg'

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
    => OpParams 
    -> Tensor v'1 t

    x

    -> Tensor Build t

    y

    oneHot

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *) (v'4 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) 
    => Tensor v'1 tI

    indices: A tensor of indices.

    -> Tensor v'2 Int32

    depth: A scalar defining the depth of the one hot dimension.

    -> Tensor v'3 t

    on_value: A scalar defining the value to fill in output when `indices[j] = i`.

    -> Tensor v'4 t

    off_value: A scalar defining the value to fill in output when `indices[j] != i`.

    -> Tensor Build t

    output: The one-hot tensor.

    Returns a one-hot tensor.

    The locations represented by indices in indices take value on_value, while all other locations take value off_value.

    If the input indices is rank N, the output will have rank `N+1`, The new axis is created at dimension axis (default: the new axis is appended at the end).

    If indices is a scalar the output shape will be a vector of length depth.

    If indices is a vector of length features, the output shape will be: @@ -92,15 +108,7 @@ in w * w'

    Ops should return a

    matMul

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t) 
    => Tensor v1 t

    a

    -> Tensor v2 t

    b

    -> Tensor Value t

    product

    Multiply the matrix "a" by the matrix "b".

    The inputs must be two-dimensional matrices and the inner dimension of - "a" (after being transposed if transpose_a is true) must match the - outer dimension of "b" (after being transposed if transposed_b is - true).

    • Note*: The default kernel implementation for MatMul on GPUs uses - cublas.

    matTranspose :: forall a v. TensorType a => Tensor v a -> Tensor Value a Source

    mean

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => Tensor v1 t

    input: The tensor to reduce.

    -> Tensor v2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Value t

    output: The reduced tensor.

    Computes the mean of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless - keep_dims is true, the rank of the tensor is reduced by 1 for each entry in - reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

    mul

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns x * y element-wise.

    • NOTE*: Mul supports broadcasting. More about broadcasting - here

    neg

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Computes numerical negative value element-wise.

    I.e., \(y = -x\).

    pack

    Arguments

    :: TensorType t 
    => [Tensor v1 t]

    values: Must be of same shape and type.

    -> Tensor Value t

    output: The packed tensor.

    Packs a list of N rank-R tensors into one rank-`(R+1)` tensor.

    Packs the N tensors in values into a tensor with rank one higher than each + ]```

    oneHot'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *) (v'4 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) 
    => OpParams 
    -> Tensor v'1 tI

    indices: A tensor of indices.

    -> Tensor v'2 Int32

    depth: A scalar defining the depth of the one hot dimension.

    -> Tensor v'3 t

    on_value: A scalar defining the value to fill in output when `indices[j] = i`.

    -> Tensor v'4 t

    off_value: A scalar defining the value to fill in output when `indices[j] != i`.

    -> Tensor Build t

    output: The one-hot tensor.

    pack

    Arguments

    :: forall (v'1 :: * -> *). TensorType t 
    => [Tensor v'1 t]

    values: Must be of same shape and type.

    -> Tensor Build t

    output: The packed tensor.

    Packs a list of N rank-R tensors into one rank-`(R+1)` tensor.

    Packs the N tensors in values into a tensor with rank one higher than each tensor in values, by packing them along the axis dimension. Given a list of tensors of shape `(A, B, C)`;

    if `axis == 0` then the output tensor will have the shape `(N, A, B, C)`. if `axis == 1` then the output tensor will have the shape `(A, N, B, C)`. @@ -110,14 +118,15 @@ in w * w'

    Ops should return a unpack.

    range

    Arguments

    :: (TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx) 
    => Tensor v1 tidx

    start: 0-D (scalar). First entry in the sequence.

    -> Tensor v2 tidx

    limit: 0-D (scalar). Upper limit of sequence, exclusive.

    -> Tensor v3 tidx

    delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

    -> Tensor Value tidx

    output: 1-D.

    Creates a sequence of numbers.

    This operation creates a sequence of numbers that begins at start and + ```

    This is the opposite of unpack.

    pack'

    Arguments

    :: forall (v'1 :: * -> *). TensorType t 
    => OpParams 
    -> [Tensor v'1 t]

    values: Must be of same shape and type.

    -> Tensor Build t

    output: The packed tensor.

    placeholder' :: forall m a. (MonadBuild m, TensorType a) => OpParams -> Shape -> m (Tensor Value a)

    range

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx 
    => Tensor v'1 tidx

    start: 0-D (scalar). First entry in the sequence.

    -> Tensor v'2 tidx

    limit: 0-D (scalar). Upper limit of sequence, exclusive.

    -> Tensor v'3 tidx

    delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

    -> Tensor Build tidx

    output: 1-D.

    Creates a sequence of numbers.

    This operation creates a sequence of numbers that begins at start and extends by increments of delta up to but not including limit.

    For example:

    ``` # start is 3 # limit is 18 # delta is 3 tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] - ```

    reducedShape :: (OneOf `[Int32, Int64]` t1, OneOf `[Int32, Int64]` t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Value Int32 Source

    Helper function for reduction ops (translation of math_ops.reduced_shape).

    relu

    Arguments

    :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) 
    => Tensor v1 t

    features

    -> Tensor Value t

    activations

    Computes rectified linear: `max(features, 0)`.

    reluGrad

    Arguments

    :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) 
    => Tensor v1 t

    gradients: The backpropagated gradients to the corresponding Relu operation.

    -> Tensor v2 t

    features: The features passed as input to the corresponding Relu operation, OR - the outputs of that operation (both work equivalently).

    -> Tensor Value t

    backprops: `gradients * (features > 0)`.

    Computes rectified linear gradients for a Relu operation.

    reshape

    Arguments

    :: (TensorType t, TensorType tshape, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) 
    => Tensor v1 t

    tensor

    -> Tensor v2 tshape

    shape: Defines the shape of the output tensor.

    -> Tensor Value t

    output

    Reshapes a tensor.

    Given tensor, this operation returns a tensor that has the same values + ```

    range'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *). OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx 
    => OpParams 
    -> Tensor v'1 tidx

    start: 0-D (scalar). First entry in the sequence.

    -> Tensor v'2 tidx

    limit: 0-D (scalar). Upper limit of sequence, exclusive.

    -> Tensor v'3 tidx

    delta: 0-D (scalar). Optional. Default is 1. Number that increments start.

    -> Tensor Build tidx

    output: 1-D.

    reducedShape :: (OneOf `[Int32, Int64]` t1, OneOf `[Int32, Int64]` t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Build Int32

    Helper function for reduction ops (translation of math_ops.reduced_shape).

    relu

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
    => Tensor v'1 t

    features

    -> Tensor Build t

    activations

    Computes rectified linear: `max(features, 0)`.

    relu'

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
    => OpParams 
    -> Tensor v'1 t

    features

    -> Tensor Build t

    activations

    reluGrad

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
    => Tensor v'1 t

    gradients: The backpropagated gradients to the corresponding Relu operation.

    -> Tensor v'2 t

    features: The features passed as input to the corresponding Relu operation, OR + the outputs of that operation (both work equivalently).

    -> Tensor Build t

    backprops: `gradients * (features > 0)`.

    Computes rectified linear gradients for a Relu operation.

    reluGrad'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t 
    => OpParams 
    -> Tensor v'1 t

    gradients: The backpropagated gradients to the corresponding Relu operation.

    -> Tensor v'2 t

    features: The features passed as input to the corresponding Relu operation, OR + the outputs of that operation (both work equivalently).

    -> Tensor Build t

    backprops: `gradients * (features > 0)`.

    reshape

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) 
    => Tensor v'1 t

    tensor

    -> Tensor v'2 tshape

    shape: Defines the shape of the output tensor.

    -> Tensor Build t

    output

    Reshapes a tensor.

    Given tensor, this operation returns a tensor that has the same values as tensor with shape shape.

    If one component of shape is the special value -1, the size of that dimension is computed so that the total size remains constant. In particular, a shape of `[-1]` flattens into 1-D. At most one component of shape can be -1.

    If shape is 1-D or higher, then the operation returns a tensor with shape @@ -154,17 +163,19 @@ in w * w'

    Ops should return a

    restore Source

    Arguments

    :: TensorType a 
    => ByteString

    File path.

    -> Tensor Ref a

    Tensor to restore.

    -> Build ControlNode 

    Restore a tensor's value from a checkpoint file.

    restoreFromName Source

    Arguments

    :: TensorType a 
    => ByteString

    File path.

    -> ByteString

    Tensor name override.

    -> Tensor Ref a

    Tensor to restore.

    -> Build ControlNode 

    Restore a tensor's value from a checkpoint file.

    This version allows restoring from a checkpoint file that uses a different - tensor name than the variable.

    save Source

    Arguments

    :: TensorType a 
    => ByteString

    File path.

    -> [Tensor v a]

    Tensors to save.

    -> Build ControlNode 

    scalar :: forall a. TensorType a => a -> Tensor Value a Source

    Create a constant scalar.

    sign

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) 
    => Tensor v1 t

    x

    -> Tensor Value t

    y

    Returns an element-wise indication of the sign of a number.

    `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.

    For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.

    size

    Arguments

    :: (TensorType t, TensorType out_type, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) 
    => Tensor v1 t

    input

    -> Tensor Value out_type

    output

    Returns the size of a tensor.

    This operation returns an integer representing the number of elements in + ```

    reshape'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) 
    => OpParams 
    -> Tensor v'1 t

    tensor

    -> Tensor v'2 tshape

    shape: Defines the shape of the output tensor.

    -> Tensor Build t

    output

    restore

    Arguments

    :: (MonadBuild m, TensorType a) 
    => ByteString

    File path.

    -> Tensor Ref a

    Tensor to restore.

    -> m ControlNode 

    Restore a tensor's value from a checkpoint file.

    restoreFromName

    Arguments

    :: (MonadBuild m, TensorType a) 
    => ByteString

    File path.

    -> ByteString

    Tensor name override.

    -> Tensor Ref a

    Tensor to restore.

    -> m ControlNode 

    Restore a tensor's value from a checkpoint file.

    This version allows restoring from a checkpoint file that uses a different + tensor name than the variable.

    save

    Arguments

    :: (Rendered v, MonadBuild m, TensorType a) 
    => ByteString

    File path.

    -> [Tensor v a]

    Tensors to save.

    -> m ControlNode 

    scalar :: TensorType a => a -> Tensor Build a

    Create a constant scalar.

    sign

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
    => Tensor v'1 t

    x

    -> Tensor Build t

    y

    Returns an element-wise indication of the sign of a number.

    `y = sign(x) = -1` if `x 0 if `x == 0`; 1 if `x 0`.

    For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.

    sign'

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
    => OpParams 
    -> Tensor v'1 t

    x

    -> Tensor Build t

    y

    size

    Arguments

    :: forall (v'1 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) 
    => Tensor v'1 t

    input

    -> Tensor Build out_type

    output

    Returns the size of a tensor.

    This operation returns an integer representing the number of elements in input.

    For example:

    ```prettyprint # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] size(t) ==> 12 - ```

    softmax

    Arguments

    :: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) 
    => Tensor v1 t

    logits: 2-D with shape `[batch_size, num_classes]`.

    -> Tensor Value t

    softmax: Same shape as logits.

    Computes softmax activations.

    For each batch i and class j we have

    softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))

    softmaxCrossEntropyWithLogits

    Arguments

    :: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) 
    => Tensor v1 t

    features: batch_size x num_classes matrix

    -> Tensor v2 t

    labels: batch_size x num_classes matrix + ```

    size'

    Arguments

    :: forall (v'1 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) 
    => OpParams 
    -> Tensor v'1 t

    input

    -> Tensor Build out_type

    output

    softmax

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
    => Tensor v'1 t

    logits: 2-D with shape `[batch_size, num_classes]`.

    -> Tensor Build t

    softmax: Same shape as logits.

    Computes softmax activations.

    For each batch i and class j we have

    softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))

    softmax'

    Arguments

    :: forall (v'1 :: * -> *). OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
    => OpParams 
    -> Tensor v'1 t

    logits: 2-D with shape `[batch_size, num_classes]`.

    -> Tensor Build t

    softmax: Same shape as logits.

    softmaxCrossEntropyWithLogits

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
    => Tensor v'1 t

    features: batch_size x num_classes matrix

    -> Tensor v'2 t

    labels: batch_size x num_classes matrix The caller must ensure that each batch of labels represents a valid - probability distribution.

    -> (Tensor Value t, Tensor Value t)

    (loss, backprop)

    • loss: Per example loss (batch_size vector).
    • backprop: backpropagated gradients (batch_size x num_classes matrix).

    Computes softmax cross entropy cost and gradients to backpropagate.

    Inputs are the logits, not probabilities.

    sparseToDense

    Arguments

    :: (TensorType t, TensorType tindices, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) 
    => Tensor v1 tindices

    sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete - index where `sparse_values[i]` will be placed.

    -> Tensor v2 tindices

    output_shape: 1-D. Shape of the dense output tensor.

    -> Tensor v3 t

    sparse_values: 1-D. Values corresponding to each row of sparse_indices, - or a scalar value to be used for all sparse indices.

    -> Tensor v4 t

    default_value: Scalar value to set for indices not specified in - sparse_indices.

    -> Tensor Value t

    dense: Dense output tensor of shape output_shape.

    Converts a sparse representation into a dense tensor.

    Builds an array dense with shape output_shape such that

    ```prettyprint + probability distribution.

    -> (Tensor Build t, Tensor Build t)

    (loss, backprop)

    • loss: Per example loss (batch_size vector).
    • backprop: backpropagated gradients (batch_size x num_classes matrix).

    Computes softmax cross entropy cost and gradients to backpropagate.

    Inputs are the logits, not probabilities.

    softmaxCrossEntropyWithLogits'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t 
    => OpParams 
    -> Tensor v'1 t

    features: batch_size x num_classes matrix

    -> Tensor v'2 t

    labels: batch_size x num_classes matrix + The caller must ensure that each batch of labels represents a valid + probability distribution.

    -> (Tensor Build t, Tensor Build t)

    (loss, backprop)

    • loss: Per example loss (batch_size vector).
    • backprop: backpropagated gradients (batch_size x num_classes matrix).

    sparseToDense

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *) (v'4 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) 
    => Tensor v'1 tindices

    sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + index where `sparse_values[i]` will be placed.

    -> Tensor v'2 tindices

    output_shape: 1-D. Shape of the dense output tensor.

    -> Tensor v'3 t

    sparse_values: 1-D. Values corresponding to each row of sparse_indices, + or a scalar value to be used for all sparse indices.

    -> Tensor v'4 t

    default_value: Scalar value to set for indices not specified in + sparse_indices.

    -> Tensor Build t

    dense: Dense output tensor of shape output_shape.

    Converts a sparse representation into a dense tensor.

    Builds an array dense with shape output_shape such that

    ```prettyprint # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value)

    # If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i]

    # If sparse_indices is an n by d matrix, then for each i in [0, n) @@ -172,11 +183,12 @@ in w * w'

    Ops should return a

    sub

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) 
    => Tensor v1 t

    x

    -> Tensor v2 t

    y

    -> Tensor Value t

    z

    Returns x - y element-wise.

    • NOTE*: Sub supports broadcasting. More about broadcasting - here

    sum

    Arguments

    :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => Tensor v1 t

    input: The tensor to reduce.

    -> Tensor v2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Value t

    output: The reduced tensor.

    Computes the sum of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless + are checked during execution.

    sparseToDense'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *) (v'3 :: * -> *) (v'4 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) 
    => OpParams 
    -> Tensor v'1 tindices

    sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + index where `sparse_values[i]` will be placed.

    -> Tensor v'2 tindices

    output_shape: 1-D. Shape of the dense output tensor.

    -> Tensor v'3 t

    sparse_values: 1-D. Values corresponding to each row of sparse_indices, + or a scalar value to be used for all sparse indices.

    -> Tensor v'4 t

    default_value: Scalar value to set for indices not specified in + sparse_indices.

    -> Tensor Build t

    dense: Dense output tensor of shape output_shape.

    sub

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
    => Tensor v'1 t

    x

    -> Tensor v'2 t

    y

    -> Tensor Build t

    z

    Returns x - y element-wise.

    • NOTE*: Sub supports broadcasting. More about broadcasting + here

    sub'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t 
    => OpParams 
    -> Tensor v'1 t

    x

    -> Tensor v'2 t

    y

    -> Tensor Build t

    z

    sum

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => Tensor v'1 t

    input: The tensor to reduce.

    -> Tensor v'2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Build t

    output: The reduced tensor.

    Computes the sum of elements across dimensions of a tensor.

    Reduces input along the dimensions given in reduction_indices. Unless keep_dims is true, the rank of the tensor is reduced by 1 for each entry in reduction_indices. If keep_dims is true, the reduced dimensions are - retained with length 1.

    transpose

    Arguments

    :: (TensorType t, TensorType tperm, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) 
    => Tensor v1 t

    x

    -> Tensor v2 tperm

    perm

    -> Tensor Value t

    y

    Shuffle dimensions of x according to a permutation.

    The output y has the same rank as x. The shapes of x and y satisfy: - `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`

    truncatedNormal Source

    Arguments

    :: TensorType a 
    => Tensor v Int64

    Shape.

    -> Build (Tensor Value a) 

    variable

    Arguments

    :: TensorType dtype 
    => Shape

    shape: The shape of the variable tensor.

    -> Build (Tensor Ref dtype)

    ref: A reference to the variable tensor.

    Holds state in the form of a tensor that persists across steps.

    Outputs a ref to the tensor state so it may be read or modified. - TODO(zhifengc/mrry): Adds a pointer to a more detail document - about sharing states in tensorflow.

    vector :: TensorType a => [a] -> Tensor Value a Source

    Create a constant vector.

    zeros :: forall a. (Num a, TensorType a) => Shape -> Tensor Value a Source

    zerosLike

    Arguments

    :: TensorType t 
    => Tensor v1 t

    x: a tensor of type T.

    -> Tensor Value t

    y: a tensor of the same shape and type as x but filled with zeros.

    Returns a tensor of zeros with the same shape and type as x.

    scalarize :: TensorType a => Tensor v a -> Tensor Value a Source

    Reshape a N-D tensor down to a scalar.

    See reshape.

    \ No newline at end of file + retained with length 1.

    sum'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) 
    => OpParams 
    -> Tensor v'1 t

    input: The tensor to reduce.

    -> Tensor v'2 tidx

    reduction_indices: The dimensions to reduce.

    -> Tensor Build t

    output: The reduced tensor.

    transpose

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) 
    => Tensor v'1 t

    x

    -> Tensor v'2 tperm

    perm

    -> Tensor Build t

    y

    Shuffle dimensions of x according to a permutation.

    The output y has the same rank as x. The shapes of x and y satisfy: + `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`

    transpose'

    Arguments

    :: forall (v'1 :: * -> *) (v'2 :: * -> *). (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) 
    => OpParams 
    -> Tensor v'1 t

    x

    -> Tensor v'2 tperm

    perm

    -> Tensor Build t

    y

    truncatedNormal

    Arguments

    :: (MonadBuild m, OneOf `[Word16, Double, Float]` a) 
    => Tensor v Int64

    Shape.

    -> m (Tensor Value a) 

    Random tensor from the unit normal distribution with bounded values.

    This is a type-restricted version of truncatedNormal.

    truncatedNormal'

    Arguments

    :: (MonadBuild m, OneOf `[Word16, Double, Float]` a) 
    => OpParams 
    -> Tensor v Int64

    Shape.

    -> m (Tensor Value a) 

    variable

    Arguments

    :: forall (m' :: * -> *). (MonadBuild m', TensorType dtype) 
    => Shape

    shape

    -> m' (Tensor Ref dtype)

    ref

    Use VariableV2 instead.

    variable'

    Arguments

    :: forall (m' :: * -> *). (MonadBuild m', TensorType dtype) 
    => OpParams 
    -> Shape

    shape

    -> m' (Tensor Ref dtype)

    ref

    vector :: TensorType a => [a] -> Tensor Build a

    Create a constant vector.

    vector' :: TensorType a => OpParams -> [a] -> Tensor Build a

    zeros :: forall a. (Num a, TensorType a) => Shape -> Tensor Build a

    zerosLike

    Arguments

    :: forall (v'1 :: * -> *). TensorType t 
    => Tensor v'1 t

    x: a tensor of type T.

    -> Tensor Build t

    y: a tensor of the same shape and type as x but filled with zeros.

    Returns a tensor of zeros with the same shape and type as x.

    zerosLike'

    Arguments

    :: forall (v'1 :: * -> *). TensorType t 
    => OpParams 
    -> Tensor v'1 t

    x: a tensor of type T.

    -> Tensor Build t

    y: a tensor of the same shape and type as x but filled with zeros.

    scalarize :: TensorType a => Tensor v a -> Tensor Build a

    Reshape a N-D tensor down to a scalar.

    See reshape.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html index 73d7efa..d766ab4 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings. (Index)

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    \ No newline at end of file +

    tensorflow-ops-0.1.0.0: Friendly layer around TensorFlow bindings.

    Index

    absTensorFlow.Ops
    abs'TensorFlow.Ops
    addTensorFlow.Ops
    add'TensorFlow.Ops
    addNTensorFlow.Ops
    addN'TensorFlow.Ops
    argMaxTensorFlow.Ops
    argMax'TensorFlow.Ops
    assignTensorFlow.Ops
    assign'TensorFlow.Ops
    broadcastGradientArgsTensorFlow.Ops
    broadcastGradientArgs'TensorFlow.Ops
    castTensorFlow.Ops
    cast'TensorFlow.Ops
    concatTensorFlow.Ops
    concat'TensorFlow.Ops
    constantTensorFlow.Ops
    constant'TensorFlow.Ops
    embeddingLookupTensorFlow.EmbeddingOps
    equalTensorFlow.Ops
    equal'TensorFlow.Ops
    expandDimsTensorFlow.Ops
    expandDims'TensorFlow.Ops
    fillTensorFlow.Ops
    fill'TensorFlow.Ops
    gradientsTensorFlow.Gradient
    identityTensorFlow.Ops
    identity'TensorFlow.Ops
    initializedVariableTensorFlow.Ops
    initializedVariable'TensorFlow.Ops
    matMulTensorFlow.Ops
    matMul'TensorFlow.Ops
    matTransposeTensorFlow.Ops
    matTranspose'TensorFlow.Ops
    meanTensorFlow.Ops
    mean'TensorFlow.Ops
    mulTensorFlow.Ops
    mul'TensorFlow.Ops
    negTensorFlow.Ops
    neg'TensorFlow.Ops
    oneHotTensorFlow.Ops
    oneHot'TensorFlow.Ops
    packTensorFlow.Ops
    pack'TensorFlow.Ops
    placeholderTensorFlow.Ops
    placeholder'TensorFlow.Ops
    rangeTensorFlow.Ops
    range'TensorFlow.Ops
    reducedShapeTensorFlow.Ops
    reluTensorFlow.Ops
    relu'TensorFlow.Ops
    reluGradTensorFlow.Ops
    reluGrad'TensorFlow.Ops
    reshapeTensorFlow.Ops
    reshape'TensorFlow.Ops
    restoreTensorFlow.Ops
    restoreFromNameTensorFlow.Ops
    saveTensorFlow.Ops
    scalarTensorFlow.Ops
    scalar'TensorFlow.Ops
    scalarizeTensorFlow.Ops
    shapeTensorFlow.Ops
    shape'TensorFlow.Ops
    signTensorFlow.Ops
    sign'TensorFlow.Ops
    sizeTensorFlow.Ops
    size'TensorFlow.Ops
    softmaxTensorFlow.Ops
    softmax'TensorFlow.Ops
    softmaxCrossEntropyWithLogitsTensorFlow.Ops
    softmaxCrossEntropyWithLogits'TensorFlow.Ops
    sparseToDenseTensorFlow.Ops
    sparseToDense'TensorFlow.Ops
    subTensorFlow.Ops
    sub'TensorFlow.Ops
    sumTensorFlow.Ops
    sum'TensorFlow.Ops
    transposeTensorFlow.Ops
    transpose'TensorFlow.Ops
    truncatedNormalTensorFlow.Ops
    truncatedNormal'TensorFlow.Ops
    variableTensorFlow.Ops
    variable'TensorFlow.Ops
    vectorTensorFlow.Ops
    vector'TensorFlow.Ops
    zeroInitializedVariableTensorFlow.Ops
    zeroInitializedVariable'TensorFlow.Ops
    zerosTensorFlow.Ops
    zerosLikeTensorFlow.Ops
    zerosLike'TensorFlow.Ops
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html index 2781862..181e279 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html +++ b/docs/haddock/tensorflow-ops-0.1.0.0/mini_TensorFlow-Ops.html @@ -1,4 +1,4 @@ TensorFlow.Ops

    TensorFlow.Ops

    \ No newline at end of file +

    TensorFlow.Ops

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-EmbeddingOps.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-EmbeddingOps.html deleted file mode 100644 index 95fc4c3..0000000 --- a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-EmbeddingOps.html +++ /dev/null @@ -1,99 +0,0 @@ - - - - - -src/TensorFlow/EmbeddingOps.hs - - - -
    -- Copyright 2016 TensorFlow authors.
    ---
    --- Licensed under the Apache License, Version 2.0 (the "License");
    --- you may not use this file except in compliance with the License.
    --- You may obtain a copy of the License at
    ---
    ---     http://www.apache.org/licenses/LICENSE-2.0
    ---
    --- Unless required by applicable law or agreed to in writing, software
    --- distributed under the License is distributed on an "AS IS" BASIS,
    --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    --- See the License for the specific language governing permissions and
    --- limitations under the License.
    -
    -{-# LANGUAGE ConstraintKinds #-}
    -{-# LANGUAGE DataKinds #-}
    -{-# LANGUAGE NoMonomorphismRestriction #-}
    -{-# LANGUAGE OverloadedStrings #-}
    -{-# LANGUAGE RankNTypes #-}
    -
    --- | Parallel lookups on the list of tensors.
    -module TensorFlow.EmbeddingOps where
    -
    -import Control.Monad (zipWithM)
    -import Data.Int (Int32, Int64)
    -import TensorFlow.Build (Build, colocateWith, render)
    -import TensorFlow.Ops (shape, vector)  -- Also Num instance for Tensor
    -import TensorFlow.Tensor (Tensor, Value)
    -import TensorFlow.Types (OneOf, TensorType)
    -import qualified TensorFlow.GenOps.Core as CoreOps
    -
    --- | Looks up `ids` in a list of embedding tensors.
    ---
    --- This function is used to perform parallel lookups on the list of
    --- tensors in `params`.  It is a generalization of `TF.gather`, where
    --- `params` is interpreted as a partition of a larger embedding
    --- tensor.
    ---
    --- The partition_strategy is "mod", we assign each id to partition
    --- `p = id % len(params)`. For instance,
    --- 13 ids are split across 5 partitions as:
    --- `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
    ---
    --- The results of the lookup are concatenated into a dense
    --- tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
    -embeddingLookup :: forall a b v .
    -                   ( TensorType a
    -                   , OneOf '[Int64, Int32] b
    -                   , Num b
    -                   )
    -                => [Tensor v a]
    -                -- ^ A list of tensors which can be concatenated along
    -                -- dimension 0. Each `Tensor` must be appropriately
    -                -- sized for `mod` partition strategy.
    -                -> Tensor Value b
    -                -- ^ A `Tensor` with type `int32` or `int64`
    -                -- containing the ids to be looked up in `params`.
    -                -- The ids are required to have fewer than 2^31
    -                -- entries.
    -                -> Build (Tensor Value a)
    -                -- ^ A dense tensor with shape `shape(ids) + shape(params)[1:]`.
    -embeddingLookup [p0] ids = colocateWith p0 (render $ CoreOps.gather p0 ids)
    -embeddingLookup params@(p0 : _) ids = do
    -    -- Do np separate lookups, finding embeddings for plist[p] in params[p]
    -    partitionedResult <- zipWithM
    -                        (\p g -> colocateWith p $ render $ CoreOps.gather p g)
    -                        params gatherIds
    -    let unshapedResult = CoreOps.dynamicStitch pindices partitionedResult
    -    -- Shape restoration is not as optimal as it would be with client
    -    -- side shape tracking.
    -    paramShape <- colocateWith p0 (render (shape p0))
    -    let finalShape = CoreOps.concat 0 [shape ids, tailShape]
    -        tailShape = CoreOps.slice paramShape (singleton 1) (singleton (-1))
    -    render $ CoreOps.reshape unshapedResult finalShape
    -  where
    -    -- Avoids genericLength here which would be evaluated by TF.
    -    np = fromIntegral (length params)
    -    flatIds = CoreOps.reshape ids (singleton (-1))
    -    pAssignments = CoreOps.cast (flatIds `CoreOps.mod` np)
    -    newIds = flatIds `CoreOps.div` np
    -    originalIndices = CoreOps.range 0 (CoreOps.size flatIds) 1
    -    -- Partition list of ids based on assignments into np separate lists
    -    gatherIds = CoreOps.dynamicPartition np newIds pAssignments
    -    -- Similarly, partition the original indices.
    -    pindices = CoreOps.dynamicPartition np originalIndices pAssignments
    -    singleton i = vector [i :: Int32]
    -
    -embeddingLookup [] _ = error "embeddingLookup requires params to be non empty"
    -
    - diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Gradient.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Gradient.html deleted file mode 100644 index 8116566..0000000 --- a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Gradient.html +++ /dev/null @@ -1,727 +0,0 @@ - - - - - -src/TensorFlow/Gradient.hs - - - -
    -- Copyright 2016 TensorFlow authors.
    ---
    --- Licensed under the Apache License, Version 2.0 (the "License");
    --- you may not use this file except in compliance with the License.
    --- You may obtain a copy of the License at
    ---
    ---     http://www.apache.org/licenses/LICENSE-2.0
    ---
    --- Unless required by applicable law or agreed to in writing, software
    --- distributed under the License is distributed on an "AS IS" BASIS,
    --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    --- See the License for the specific language governing permissions and
    --- limitations under the License.
    -
    -{-# LANGUAGE ConstraintKinds #-}
    -{-# LANGUAGE DataKinds #-}
    -{-# LANGUAGE FlexibleContexts #-}
    -{-# LANGUAGE OverloadedStrings #-}
    -{-# LANGUAGE RankNTypes #-}
    -{-# LANGUAGE ScopedTypeVariables #-}
    -{-# LANGUAGE TypeFamilies #-}
    -{-# LANGUAGE ViewPatterns #-}
    -
    -module TensorFlow.Gradient
    -    ( gradients
    -    ) where
    -
    -import Control.Monad (forM, zipWithM)
    -import Control.Monad.State.Strict (State, evalState, gets, modify)
    -import Data.ByteString (ByteString)
    -import Data.Complex (Complex)
    -import Data.Default (def)
    -import Data.Int (Int32, Int64)
    -import Data.List (foldl', sortBy)
    -import Data.Map.Strict (Map)
    -import Data.Maybe (fromMaybe, maybeToList, mapMaybe)
    -import Data.Ord (comparing)
    -import Data.ProtoLens.TextFormat (showMessage)
    -import Data.Set (Set)
    -import Data.Text (Text)
    -import Data.Tuple (swap)
    -import Lens.Family2 (Lens', (&), (^.), (.~), (%~))
    -import Lens.Family2.State.Strict (uses)
    -import Lens.Family2.Stock (at, intAt)
    -import Lens.Family2.Unchecked (lens, iso)
    -import Prelude hiding (sum)
    -import Text.Printf (printf)
    -import qualified Data.Graph.Inductive.Basic as FGL
    -import qualified Data.Graph.Inductive.Graph as FGL
    -import qualified Data.Graph.Inductive.PatriciaTree as FGL
    -import qualified Data.Graph.Inductive.Query.DFS as FGL
    -import qualified Data.IntMap.Strict as IntMap
    -import qualified Data.Map.Strict as Map
    -import qualified Data.Set as Set
    -import qualified Data.Text as Text
    -
    -import qualified TensorFlow.GenOps.Core as CoreOps
    -import TensorFlow.Build
    -    ( Build
    -    , render
    -    , renderNodeName
    -    , renderedNodeDefs
    -    , opDef
    -    , opAttr
    -    )
    -import TensorFlow.BuildOp
    -import TensorFlow.Ops
    -    ( addN
    -    , broadcastGradientArgs
    -    , expandDims
    -    , fill
    -    , matMul
    -    , reducedShape
    -    , reluGrad
    -    , reshape
    -    , scalar
    -    , shape
    -    , softmaxCrossEntropyWithLogits
    -    , sum
    -    , scalarize
    -    , vector
    -    , zerosLike
    -    )
    -import TensorFlow.Output
    -    ( NodeName(..)
    -    , Op (Rendered)
    -    , Output(..)
    -    , OutputIx(..)
    -    , outputIndex
    -    )
    -import TensorFlow.Tensor
    -    ( Tensor(..)
    -    , TensorKind (ValueKind)
    -    , Value
    -    , tensorOutput
    -    , tensorAttr
    -    )
    -import TensorFlow.Types (Attribute, OneOf, TensorType, attrLens)
    -import Proto.Tensorflow.Core.Framework.NodeDef
    -    (NodeDef, attr, input, op, name)
    -
    -type GradientCompatible a =
    -    -- TODO(fmayle): MaxPoolGrad doesn't support Double for some reason.
    -    (Num a, OneOf '[ Float, Complex Float, Complex Double ] a)
    -
    --- TODO(fmayle): Support control flow.
    --- TODO(fmayle): Support gate_gradients-like option to avoid race conditions.
    --- TODO(fmayle): Do we need to consider control inputs? See _PendingCount in
    --- tensorflow/python/ops/gradients.py.
    --- TODO(fmayle): Maybe store the gradient functions and numOutputs on the OpDef.
    -
    -
    --- | Gradient of @y@ w.r.t. each element of @xs@.
    -gradients :: forall a v1 v2 . ( Num (Tensor v1 a)
    -                                -- TODO(gnezdo): remove indirect constraint.
    -                               -- It's a wart inherited from Num instance.
    -                              , v1 ~ Value
    -                              , GradientCompatible a
    -                              )
    -          => Tensor v1 a  -- ^ The output of the graph.
    -          -> [Tensor v2 a]  -- ^ Tensors for which gradients are computed.
    -          -> Build [Tensor Value a]
    -gradients y xs = do
    -    -- The gradients are computed using "reverse accumulation", similarly to
    -    -- what is described here:
    -    -- https://en.wikipedia.org/wiki/Automatic_differentiation#The_chain_rule.2C_forward_and_reverse_accumulation
    -    --
    -    -- The code is summarised as follows:
    -    --
    -    -- 1. Create an fgl graph of the relevant nodes (ops) and edges (tensors).
    -    -- 2. Initialize the gradient of y to 1 (∂y/∂y = 1) and the rest of tensor's
    -    --    gradients to nothing.
    -    -- 3. Process the nodes in reverse topological order (i.e. each node comes
    -    --    after all of its outputs so that the output gradients for a node have
    -    --    been completely calculated before it is processed):
    -    --      a. Record the gradient for each of the node's output tensors (∂y/∂w
    -    --         for each output tensor w).
    -    --      b. Calculate the gradient of y w.r.t. each of the node's input
    -    --         tensors using the gradients of the node's output tensors.
    -    --
    -    --         Written differently, for each output tensor w and input tensor v:
    -    --           ∂y/∂w = ...            (calculated in previous steps)
    -    --           ∂w/∂v = ...            (op specific)
    -    --           ∂y/∂v = ∂y/∂w * ∂w/∂v  (technically, if tensor v is an input
    -    --                                   to multiple nodes, then this is only
    -    --                                   part of ∂y/∂v)
    -    --
    -    -- 4. Lookup the recorded gradient for each x in xs.
    -
    -    yName <- renderNodeName y
    -    -- TODO(fmayle): Move this into Build.hs and call it unsafeNodeDefFromName?
    -    nodeDefLookup :: (NodeName -> NodeDef) <- uses renderedNodeDefs $
    -        (\f x -> fromMaybe (error $ "no NodeDef found for " ++ show x) (f x))
    -        . flip Map.lookup
    -    let (gr, nodeMap) = createGraph yName nodeDefLookup
    -    -- Set gradient of y to one.
    -    let initPending :: Map.Map FGL.Node (PendingGradients a)
    -        initPending = Map.empty & at (nodeMap Map.! yName)
    -                                . nonEmpty
    -                                . outputIxAt (y ^. tensorOutput . outputIndex)
    -                                . nonEmpty
    -                                .~ [fill (shape y) (scalar 1)]
    -    -- Calculate the gradients of y w.r.t. each node in the graph.
    -    gradientMap <- graphGrads gr initPending
    -    -- Lookup the gradients for each x.
    -    forM xs $ \x -> do
    -        xName <- renderNodeName x
    -        render $ fromMaybe (zerosLike x) $ do
    -            n <- nodeMap ^. at xName
    -            let i = x ^. tensorOutput . outputIndex
    -            gradientMap ^. at n . nonEmpty . outputIxAt i
    -
    -outputIxAt :: OutputIx -> Lens' (IntMap.IntMap v) (Maybe v)
    -outputIxAt = intAt . unOutputIx
    -
    --- | Incomplete gradients of a node's outputs.
    ---
    --- The lists represent partial sums. The key is an OutputIx sans newtype.
    -type PendingGradients a = IntMap.IntMap [Tensor Value a]
    -
    --- | Gradients of a node's outputs. The key is an OutputIx sans newtype.
    -type Gradients a = IntMap.IntMap (Tensor Value a)
    -
    --- | Graph of TensorFlow operations.
    -type Graph = FGL.Gr NodeDef EdgeLabel
    -
    --- | Data associated with an edge.
    ---
    --- Pair of
    ---   1. Output index of a tensor from the source node.
    ---   2. Input index that the tensor connects to on the destination node.
    -type EdgeLabel = (OutputIx, OutputIx)
    -
    -
    --- | State used for calculating gradients.
    -data GradientsState a = GradientsState
    -                      { _gradientsPending :: !(Map FGL.Node (PendingGradients a))
    -                      , _gradientsResult  :: !(Map FGL.Node (Gradients a))
    -                      }
    -
    -gradientsPending :: Lens' (GradientsState a) (Map FGL.Node (PendingGradients a))
    -gradientsPending = lens _gradientsPending (\x y -> x { _gradientsPending = y })
    -
    -gradientsResult :: Lens' (GradientsState a) (Map FGL.Node (Gradients a))
    -gradientsResult = lens _gradientsResult (\x y -> x { _gradientsResult = y })
    -
    -
    --- TODO(fmayle): Use something like Data.List.Safe.
    --- | Safe version of (!!).
    -safeIndex :: [a] -> Int -> Maybe a
    -_      `safeIndex` n | n < 0 = Nothing
    -[]     `safeIndex` _         = Nothing
    -(x:_)  `safeIndex` 0         = Just x
    -(_:xs) `safeIndex` n         = xs `safeIndex` (n-1)
    -
    --- Copy of http://hackage.haskell.org/package/lens-3.9.0.2/docs/Control-Lens-Iso.html#v%3anon
    -anon :: a -> (a -> Bool) -> Lens' (Maybe a) a
    -anon a p = iso (fromMaybe a) go where
    -  go b | p b       = Nothing
    -       | otherwise = Just b
    -
    -non :: Eq a => a -> Lens' (Maybe a) a
    -non a = anon a (a==)
    -
    --- | Lens that defaults Nothing to mempty.
    -nonEmpty :: (Monoid (t v), Foldable t) => Lens' (Maybe (t v)) (t v)
    -nonEmpty = anon mempty null
    -
    --- | Calculate the gradients for every node in a graph.
    -graphGrads :: forall a. GradientCompatible a
    -           => Graph
    -           -> Map FGL.Node (PendingGradients a)
    -           -- ^ Initial gradients (usually just 1 for the node of interest).
    -           -> Build (Map FGL.Node (Gradients a))
    -graphGrads gr initPending = pure (foldl' go initState nodeOrder ^. gradientsResult)
    -  where
    -    initState = GradientsState initPending Map.empty
    -    -- Reverse topological sort.
    -    -- TODO(fmayle): Filter out nodes that are not successors of any x in xs to
    -    -- avoid calculating gradients that won't be used.
    -    nodeOrder = FGL.topsort $ FGL.grev gr
    -    go state node =
    -        -- Aggregate the accumulated gradients for this node.
    -        let outputGrads =
    -                sumPendingGradient (state ^. gradientsPending . at node . nonEmpty)
    -        in if null outputGrads
    -           then state
    -           else
    -              -- Calculate the gradients for each of the node's inputs.
    -              let nextState = state & gradientsResult %~ Map.insert node outputGrads
    -                  ctx = FGL.context gr node
    -              in updatePendingGradients
    -                 ctx
    -                 (calculateInputGrads ctx outputGrads gr)
    -                 nextState
    -
    --- | Reduce accumulated gradients for each output to one Tensor.
    -sumPendingGradient :: GradientCompatible a
    -                   => PendingGradients a -> Gradients a
    -sumPendingGradient = IntMap.mapMaybe f
    -  where
    -    f [] = Nothing
    -    f [x] = Just x
    -    f xs = Just (addN xs)
    -
    -
    --- | Calculate the gradients of a node's input tensors.
    ---
    --- This is mostly just a wrapper around opGrad.
    -calculateInputGrads :: forall a. GradientCompatible a
    -                    => FGL.Context NodeDef EdgeLabel
    -                    -> Gradients a  -- ^ Output gradients of the node.
    -                    -> Graph
    -                    -> [Maybe (Tensor Value a)]
    -calculateInputGrads (inputEdges, _, nodeDef, _) outputGrads gr =
    -    opGrad (nodeDef ^. op) nodeDef inputTensors fullOutGrads
    -  where
    -    fullOutGrads =
    -        fullOutputGrads (numOutputs nodeDef) (Rendered nodeDef) outputGrads
    -    -- Create a tensor from an edge (technically an Output, but it seems less
    -    -- confusing to refer to it as a tensor here).
    -    edgeToTensor :: (EdgeLabel, FGL.Node) -> Output
    -    edgeToTensor ((i, _), n) =
    -        case FGL.lab gr n of
    -            Just edgeNodeDef -> Output i (Rendered edgeNodeDef)
    -            Nothing -> error $ "calculateInputGrads: missing input node for "
    -                               ++ Text.unpack (nodeDef ^. name)
    -    -- Input tensors, sorted by input index.
    -    inputTensors = map edgeToTensor $ sortBy (comparing (snd . fst)) inputEdges
    -
    --- | Convert a Map of gradients to a list, with zeros for missing outputs.
    -fullOutputGrads :: (TensorType a, Num a)
    -                => OutputIx  -- ^ Number of outputs.
    -                -> Op
    -                -> Gradients a
    -                -> [Tensor Value a]
    -fullOutputGrads n o gs =
    -    map (\i -> fromMaybe (zero i) (gs ^. outputIxAt i)) [0..n-1]
    -  where
    -    -- A tensor of zeros with the same shape as the i'th output.
    -    zero i = zerosLike $ toT (Output i o)
    -
    -
    --- | Update the pending gradients of a node's inputs.
    -updatePendingGradients :: forall a. (TensorType a, Num a)
    -                       => FGL.Context NodeDef EdgeLabel
    -                       -> [Maybe (Tensor Value a)]
    -                       -- ^ Gradient of each input tensor.
    -                       -> GradientsState a
    -                       -> GradientsState a
    -updatePendingGradients (inputEdges, _, nodeDef, _) inputGrads initState =
    -    foldl' go initState inputEdges
    -  where
    -    go :: GradientsState a -> (EdgeLabel, FGL.Node) -> GradientsState a
    -    go state ((outIndex, OutputIx inIndex), node) =
    -        case maybeGradient of
    -            Nothing -> state
    -            Just g ->
    -                -- Add to the list of pending gradients for this tensor.
    -                state & gradientsPending
    -                      . at node
    -                      . nonEmpty
    -                      . outputIxAt outIndex
    -                      . nonEmpty
    -                      %~ (g:)
    -      where
    -        badSizeErr = error $ printf "updatePendingGradients: bad input index \
    -                                    \%d for inputGrads of length %d in %s"
    -                                    inIndex (length inputGrads)
    -                                    (show (nodeDef ^. name))
    -        maybeGradient = fromMaybe badSizeErr (safeIndex inputGrads inIndex)
    -
    -
    --- | Create a graph that includes a node and its transitive dependencies.
    -createGraph :: NodeName -> (NodeName -> NodeDef)
    -            -> (Graph, Map NodeName FGL.Node)
    -createGraph nodeName nodeDefLookup = (FGL.nmap nodeDefLookup graph, nodeMap)
    -  where
    -    -- Parse a tensor name.
    -    parseTensorName :: Text -> Maybe (NodeName, OutputIx)
    -    parseTensorName n
    -        | Text.null n        = error "parseTensorName: empty name"
    -        | Text.head n == '^' = Nothing  -- Control edge
    -        | otherwise          =
    -            let (nm, indexStr) = Text.breakOn ":" n
    -                index | Text.null indexStr = 0
    -                      | otherwise = read $ Text.unpack $ Text.tail indexStr
    -            in Just (NodeName nm, OutputIx index)
    -
    -    -- Build a map from node name to outward edges.
    -    --
    -    -- The state is the set of visited nodes.
    -    collect :: Maybe (NodeName, OutputIx, OutputIx)
    -            -> NodeName
    -            -> State (Set NodeName)
    -                     (Map NodeName [(NodeName, OutputIx, OutputIx)])
    -    collect outgoingEdge nm = do
    -        let nextLookup = Map.singleton nm (maybeToList outgoingEdge)
    -        seen <- gets (Set.member nm)
    -        modify (Set.insert nm)
    -        if seen
    -            then pure nextLookup
    -            else do
    -                let inputs = nodeDefLookup nm ^. input
    -                    recurse inIndex (parentName, outIndex) =
    -                        collect (Just (nm, outIndex, inIndex)) parentName
    -                subEdgeLookups <-
    -                    zipWithM recurse [0..] $ mapMaybe parseTensorName inputs
    -                pure $ Map.unionsWith (++) (nextLookup:subEdgeLookups)
    -
    -    edgeLookup = evalState (collect Nothing nodeName) Set.empty
    -    -- Associate an ID with each node name.
    -    nodeMap = Map.fromList $ zip (Map.keys edgeLookup) [0..]
    -    -- Create the graph.
    -    graph = FGL.mkGraph (swap <$> Map.toList nodeMap)
    -                        [ (nodeMap Map.! n, nodeMap Map.! m, (i, j))
    -                        | (n, edges) <- Map.toList edgeLookup
    -                        , (m, i, j) <- edges
    -                        ]
    -
    --- | Function to compute the gradient of y w.r.t. each input.
    ---
    --- Let y be an arbitrary tensor
    --- and [w_0, ..., w_n] be the output tensors of a node
    --- and [v_0, ..., v_n] be the input tensors of the same node.
    ---
    --- Given [∂y/∂w_0, ..., ∂y/∂w_n] and [v_0, ..., v_n], a GradientFunc computes
    --- [∂y/∂v_0, ..., ∂y/∂v_n] for a particular op type.
    ---
    --- A Nothing gradient is equivalent to zero (but allows for short circuiting
    --- computation when all the gradients for something are Nothing).
    -type GradientFunc a = NodeDef
    -                    -> [Output]
    -                    -- ^ Input tensors.
    -                    -> [Tensor Value a]
    -                    -- ^ Gradient of y w.r.t. each output tensor.
    -                    -> [Maybe (Tensor Value a)]
    -                    -- ^ Gradient of y w.r.t. each input tensor.
    -
    -
    --- TODO(fmayle): Assert the type is correct.
    --- | Create a Tensor from an Output.
    -toT :: Output -> Tensor Value a
    -toT = Tensor ValueKind
    -
    -
    --- | Wrapper around `TensorFlow.GenOps.Core.slice` that builds vectors from scalars for
    --- simple slicing operations.
    -flatSlice :: forall v1 t . (TensorType t)
    -         => Tensor v1 t    -- ^ __input__
    -         -> Int32          -- ^ __begin__: specifies the offset into the first dimension of
    -                           -- 'input' to slice from.
    -         -> Int32          -- ^ __size__: specifies the number of elements of the first dimension
    -                           -- of 'input' to slice. If size is -1, all remaining elements in the dimension
    -                           -- are included in the slice (i.e. this is equivalent to setting
    -                           -- size = input.dim_size(0) - begin).
    -         -> Tensor Value t -- ^ __output__
    -flatSlice t begin size = CoreOps.slice t (vector [begin]) (vector [size])
    -
    -
    --- | The gradient function for an op type.
    ---
    --- These implementations should match their python counterparts in:
    --- third_party/tensorflow/python/ops/*_grad.py
    -opGrad :: forall a . GradientCompatible a => Text -> GradientFunc a
    -
    -opGrad "Abs" _ [toT -> x] [dz] = [Just $ dz * signum x]
    -opGrad "Neg" _ [_] [dz] = [Just $ -dz]
    -opGrad "Relu" _ [toT -> x] [dz] = [Just $ reluGrad dz x]
    -
    -opGrad "Square" _ [toT -> x] [dz] =
    -    -- TODO(fmayle): Handle complex numbers.
    -    -- TODO(fmayle): The python code makes dz a control dependency of the 2*x
    -    -- (for performance reasons?). Will need to put these functions in the Build
    -    -- monad to replicate that.
    -    [Just $ dz * (2 * x)]
    -
    -opGrad "Gather" _ [toT -> x, toT -> indices] [dz] =
    -    -- TODO(fmayle): The python version uses a better performance implementation
    -    -- when the shape is known without having to run the graph.
    -    -- TODO(fmayle): We shouldn't convert the result to a dense tensor. Sparse
    -    -- tensor support will require some thinking.
    -    [ Just $ CoreOps.unsortedSegmentSum values indices' numRows
    -    , Nothing
    -    ]
    -  where
    -    -- TODO(gnezdo): Use colocateWith but it requires Build monad.
    -    denseShape = shape (x :: Tensor Value a)
    -    numRows = scalarize $ flatSlice denseShape 0 1
    -    valuesShape = CoreOps.concat 0 [ allDimensions
    -                                   , flatSlice denseShape 1 (-1)
    -                                   ]
    -    values = reshape dz valuesShape
    -    -- TODO(fmayle): This could be either Int32 or Int64.
    -    indices' = reshape indices allDimensions :: Tensor Value Int32
    -
    -opGrad "Max" _ [toT -> x, toT -> indices] [dz] =
    -    [Just $ indicators `CoreOps.div` numSelected * dz', Nothing]
    -  where
    -    sx = shape (x :: Tensor Value a)
    -    outputShapeKeptDims = reducedShape sx (indices :: Tensor Value Int32)
    -    x' = reshape x outputShapeKeptDims
    -    dz' = reshape dz outputShapeKeptDims
    -    indicators = CoreOps.cast $ CoreOps.equal x' x
    -    numSelected = reshape (sum indicators indices) outputShapeKeptDims
    -
    --- Min and Max have identical gradient implementations.
    -opGrad "Min" u v w = opGrad "Max" u v w
    -
    -opGrad "Sum" _ [toT -> x, toT -> indices] [dz] =
    -    [ Just $ CoreOps.tile grad tileScaling, Nothing ]
    -  where
    -    -- TODO(gnezdo): Implement the fast-path from math_grad._SumGrad.
    -    sx = shape (x :: Tensor Value a)
    -    outputShapeKeptDims = reducedShape sx (indices :: Tensor Value Int32)
    -    tileScaling = safeShapeDiv sx outputShapeKeptDims
    -    grad = reshape dz outputShapeKeptDims
    -
    -opGrad "Mean" u v@[toT -> x, _] w =
    -    [Just $ dz `CoreOps.div` CoreOps.cast factor, Nothing]
    -  where
    -    [Just dz, Nothing] = opGrad "Sum" u v w
    -    inputShape = shape (x :: Tensor Value a)
    -    outputShape = shape (dz :: Tensor Value a)
    -    -- TODO(fmayle): Add fast path when shape is known.
    -    inputSize = CoreOps.prod inputShape $ rangeOfRank inputShape
    -    outputSize = CoreOps.prod outputShape $ rangeOfRank outputShape
    -    factor = safeShapeDiv inputSize outputSize
    -
    -opGrad "Add" _ [toT -> x, toT -> y] [dz] =
    -    [ Just $ reshape (sum dz rx) sx
    -    , Just $ reshape (sum dz ry) sy ]
    -  where
    -    sx = shape (x :: Tensor Value a)
    -    sy = shape (y :: Tensor Value a)
    -    (rx, ry) = broadcastGradientArgs sx sy
    -
    -opGrad "Sub" u v w =
    -    [Just x, Just (-y)]
    -  where
    -    [Just x, Just y] = opGrad "Add" u v w
    -
    -opGrad "SoftmaxCrossEntropyWithLogits" _ [toT -> x, toT -> y] [dz, _] =
    -    [ Just $ expandDims dz (-1) * snd (softmaxCrossEntropyWithLogits x y)
    -    , Nothing ]
    -
    -opGrad "Mul" _ [toT -> x, toT -> y] [dz] =
    -    -- TODO(fmayle): Handle complex numbers.
    -    [ Just $ reshape (sum (dz * y) rx) sx
    -    , Just $ reshape (sum (x * dz) ry) sy ]
    -  where
    -    sx = shape (x :: Tensor Value a)
    -    sy = shape (y :: Tensor Value a)
    -    (rx, ry) = broadcastGradientArgs sx sy
    -
    -opGrad "Div" _ [toT -> x, toT -> y] [dz] =
    -    -- TODO(fmayle): Handle complex numbers.
    -    -- TODO(gnezdo): Provide Fractional instance and use '/' instead of div.
    -    [ Just $ reshape (sum (dz `CoreOps.div` y) rx) sx
    -    , Just $ reshape (sum (dz * (negate x `CoreOps.div` (y * y))) ry) sy
    -    ]
    -  where
    -    sx = shape (x :: Tensor Value a)
    -    sy = shape (y :: Tensor Value a)
    -    (rx, ry) = broadcastGradientArgs sx sy
    -
    -opGrad "MatMul" nodeDef [toT -> x, toT -> y] [dz] =
    -    let transposeA = lookupAttr nodeDef "transpose_a"
    -        transposeB = lookupAttr nodeDef "transpose_b"
    -        transAttrs a b =
    -            (tensorAttr "transpose_a" .~ a) . (tensorAttr "transpose_b" .~ b)
    -    in case (transposeA, transposeB) of
    -       (False, False) ->
    -           [ Just $ (dz `matMul` y) & transAttrs False True
    -           , Just $ (x `matMul` dz) & transAttrs True False ]
    -       (False, True) ->
    -           [ Just $ dz `matMul` y
    -           , Just $ (x `matMul` dz) & transAttrs True False ]
    -       (True, False) ->
    -           [ Just $ (dz `matMul` y) & transAttrs False True
    -           , Just $ x `matMul` dz ]
    -       (True, True) ->
    -           [ Just $ (dz `matMul` y) & transAttrs True True
    -           , Just $ (x `matMul` dz) & transAttrs True True ]
    -
    -opGrad "Transpose" _ [_, toT -> p] [dz] =
    -    [ Just $ CoreOps.transpose dz
    -            (CoreOps.invertPermutation p :: Tensor Value Int32)
    -    , Nothing
    -    ]
    -
    -opGrad "Conv2D" nodeDef [toT -> x, toT -> y] [dz] =
    -    [ Just $ CoreOps.conv2DBackpropInput (shape x) y dz
    -          & tensorAttr "strides" .~ strides
    -          & tensorAttr "padding" .~ padding
    -          & tensorAttr "use_cudnn_on_gpu" .~ useCudnnOnGpu
    -          & tensorAttr "data_format" .~ dataFormat
    -    , Just $ CoreOps.conv2DBackpropFilter x (shape y) dz
    -          & tensorAttr "strides" .~ strides
    -          & tensorAttr "padding" .~ padding
    -          & tensorAttr "use_cudnn_on_gpu" .~ useCudnnOnGpu
    -          & tensorAttr "data_format" .~ dataFormat
    -    ]
    -  where
    -    strides = lookupAttr nodeDef "strides" :: [Int64]
    -    padding = lookupAttr nodeDef "padding" :: ByteString
    -    useCudnnOnGpu = lookupAttr nodeDef "use_cudnn_on_gpu" :: Bool
    -    dataFormat = lookupAttr nodeDef "data_format" :: ByteString
    -
    -opGrad "MaxPool" nodeDef [toT -> x] [dz] =
    -    [ Just $ CoreOps.maxPoolGrad x output dz
    -          & tensorAttr "ksize" .~ ksize
    -          & tensorAttr "strides" .~ strides
    -          & tensorAttr "padding" .~ padding
    -          & tensorAttr "data_format" .~ dataFormat
    -    ]
    -  where
    -    output :: Tensor Value a
    -    output = toT $ Output 0 (Rendered nodeDef)
    -    ksize = lookupAttr nodeDef "ksize" :: [Int64]
    -    strides = lookupAttr nodeDef "strides" :: [Int64]
    -    padding = lookupAttr nodeDef "padding" :: ByteString
    -    dataFormat = lookupAttr nodeDef "data_format" :: ByteString
    -
    -opGrad "Reshape" _ [toT -> x, _] [dz] =
    -    [Just $ reshape dz $ shape (x :: Tensor Value a), Nothing]
    -
    -opGrad "OneHot" _ _ _ = [Nothing, Nothing, Nothing, Nothing]
    -opGrad "TruncatedNormal" _ _ _ = [Nothing]
    -
    -opGrad "RefIdentity" _ _ [dz] = [Just dz]
    -opGrad "Cast" nodeDef _ [dz] = [Just reverseCast]
    -  where
    -    -- TODO(gnezdo): too permissive, python only allows float types as src_type.
    -    reverseCast =
    -        buildOp (opDef "Cast"
    -                 & opAttr "DstT" .~ (lookupAttr nodeDef "SrcT" :: ByteString)
    -                 & opAttr "SrcT" .~ (lookupAttr nodeDef "DstT" :: ByteString))
    -        dz
    -
    -opGrad "DynamicStitch" nodeDef inputs [dz] =
    -    replicate halfLen Nothing ++ valuesGrads
    -  where
    -    halfLen =
    -        let len = length inputs
    -            half = len `div` 2
    -        in if 2 * half == len
    -           then half
    -           else error ("Uneven input size " ++ show (len, showMessage nodeDef))
    -    valuesGrads = [ Just $ CoreOps.gather dz (toT idx :: Tensor Value Int32)
    -                  | idx <- take halfLen inputs
    -                  ]
    -
    -opGrad "DynamicPartition" nodeDef [toT -> xs, toT -> indices] dz =
    -    [ Just reconstructed, Nothing ]
    -  where
    -    reconstructed = CoreOps.reshape stitched
    -                    (CoreOps.shape (xs :: Tensor Value a) :: Tensor Value Int32)
    -    stitched = CoreOps.dynamicStitch partitionedIndices dz
    -    partitionedIndices = CoreOps.dynamicPartition np originalIndices indices
    -    np = lookupAttr nodeDef "num_partitions" :: Int64
    -    originalIndices =
    -        CoreOps.reshape (CoreOps.range 0 (CoreOps.size indices) 1) prefixShape
    -    prefixShape = shapeInt32 indices
    -    shapeInt32 = CoreOps.shape :: Tensor Value Int32 -> Tensor Value Int32
    -
    -opGrad "Select" _ [toT -> c, toT -> x, _] [dz] =
    -    [ Nothing
    -    , Just $ CoreOps.select c dz zeros
    -    , Just $ CoreOps.select c zeros dz
    -    ]
    -  where zeros = CoreOps.zerosLike x
    -
    --- TODO(gnezdo): Unlike Python, no control dependency on dz.
    -opGrad "Log" _ [toT -> x] [dz] = [ Just $ dz * CoreOps.inv x ]
    --- TODO(gnezdo): Reuse the output instead of doing another exp,
    --- though, it is probably CSE'd away anyway.
    -opGrad "Exp" _ [toT -> x] [dz] = [ Just $ dz * CoreOps.exp x ]
    -opGrad "SparseSegmentSum" _ [toT -> x, toT -> y, toT -> t] [dz] =
    -    [ Just $ CoreOps.unsortedSegmentSum
    -             (CoreOps.gather dz (t :: Tensor Value Int32))
    -             (y :: Tensor Value Int32) inputRows
    -    , Nothing
    -    , Nothing
    -    ]
    -  where inputRows = flatSlice (shape (x :: Tensor Value a)) 0 1
    -
    -opGrad "LabelClasses" _ _ _ = [Nothing, Nothing]
    -opGrad "LabelWeights" _ _ _ = [Nothing]
    -opGrad "Size" _ _ _ = [Nothing]
    -opGrad "ZerosLike" _ _ _ = [Nothing]
    -
    --- TODO(fmayle): These can go away if we properly prune the graph.
    -opGrad "Const" _ _ _ = [Nothing, Nothing]
    -opGrad "Placeholder" _ _ _ = []
    -opGrad "Variable" _ _ _ = []
    -
    -opGrad n nodeDef ins grads =
    -    error $ "no gradient implemented for " ++
    -            show (n, length ins, length grads, showMessage nodeDef, ins)
    -
    --- | The number of outputs for an op type.
    -numOutputs :: NodeDef -> OutputIx
    -numOutputs o =
    -    case o ^. op of
    -        "Abs" -> 1
    -        "Add" -> 1
    -        "Cast" -> 1
    -        "Const" -> 1
    -        "Conv2D" -> 1
    -        "Div" -> 1
    -        "DynamicStitch" -> 1
    -        "DynamicPartition" ->
    -            fromIntegral (lookupAttr o "num_partitions" :: Int64)
    -        "Exp" -> 1
    -        "Gather" -> 1
    -        "LabelClasses" -> 1
    -        "LabelWeights" -> 1
    -        "Log" -> 1
    -        "MatMul" -> 1
    -        "Max" -> 1
    -        "MaxPool" -> 1
    -        "Mean" -> 1
    -        "Min" -> 1
    -        "Mul" -> 1
    -        "Neg" -> 1
    -        "Placeholder" -> 1
    -        "OneHot" -> 1
    -        "RefIdentity" -> 1
    -        "Relu" -> 1
    -        "Reshape" -> 1
    -        "Select" -> 1
    -        "Size" -> 1
    -        "SoftmaxCrossEntropyWithLogits" -> 2
    -        "Square" -> 1
    -        "SparseSegmentSum" -> 1
    -        "Sub" -> 1
    -        "Sum" -> 1
    -        "Transpose" -> 1
    -        "TruncatedNormal" -> 1
    -        "Variable" -> 1
    -        "ZerosLike" -> 1
    -        _ -> error $ "numOuputs not implemented for " ++ show (o ^. op)
    -
    --- Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`
    -safeShapeDiv :: Tensor v1 Int32 -> Tensor v2 Int32 -> Tensor Value Int32
    -safeShapeDiv x y = x `CoreOps.div` (CoreOps.maximum y 1)
    -
    -allDimensions :: Tensor Value Int32
    -allDimensions = vector [-1 :: Int32]
    -
    -rangeOfRank :: forall v1 t. TensorType t => Tensor v1 t -> Tensor Value Int32
    -rangeOfRank x = CoreOps.range 0 (CoreOps.rank x) 1
    -
    -lookupAttr ::  Attribute a1 => NodeDef -> Text -> a1
    -lookupAttr nodeDef attrName = nodeDef ^. attr . at attrName . non def . attrLens
    -
    - diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Ops.html b/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Ops.html deleted file mode 100644 index 52dde45..0000000 --- a/docs/haddock/tensorflow-ops-0.1.0.0/src/TensorFlow-Ops.html +++ /dev/null @@ -1,307 +0,0 @@ - - - - - -src/TensorFlow/Ops.hs - - - -
    -- Copyright 2016 TensorFlow authors.
    ---
    --- Licensed under the Apache License, Version 2.0 (the "License");
    --- you may not use this file except in compliance with the License.
    --- You may obtain a copy of the License at
    ---
    ---     http://www.apache.org/licenses/LICENSE-2.0
    ---
    --- Unless required by applicable law or agreed to in writing, software
    --- distributed under the License is distributed on an "AS IS" BASIS,
    --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    --- See the License for the specific language governing permissions and
    --- limitations under the License.
    -
    --- | This module contains definitions for some built-in TensorFlow operations.
    ---
    --- Note that certain, "stateful" ops like 'variable' and 'assign' return a
    --- 'Build' action (e.g., @Build (Tensor Ref a)@ instead of a pure value; the
    --- returned 'Tensor's are always rendered in the current 'Build' context.  This
    --- approach helps us avoid problems with inlining or common subexpression
    --- elimination, by writing
    ---
    --- > do
    --- >     v <- variable []
    --- >     w <- assign v 3
    --- >     render $ w * w
    ---
    --- instead of
    ---
    --- > let
    --- >    v = variable []
    --- >    w = assign v 3
    --- > in w * w
    ---
    --- since the latter could be reasonably transformed by the compiler into (or
    --- vice versa)
    ---
    --- > let
    --- >    v = variable []
    --- >    w = assign v 3
    --- >    w' = assign v 3
    --- > in w * w'
    ---
    --- Ops should return a 'Build' action if their original 'OpDef' marks them as
    --- stateful, or if they take any Refs as input.  (This mirrors the rules that
    --- TensorFlow uses to avoid common subexpression elimination.)
    -{-# LANGUAGE ConstraintKinds #-}
    -{-# LANGUAGE DataKinds #-}
    -{-# LANGUAGE FlexibleInstances #-}
    -{-# LANGUAGE OverloadedLists #-}
    -{-# LANGUAGE OverloadedStrings #-}
    -{-# LANGUAGE RankNTypes #-}
    -{-# LANGUAGE ScopedTypeVariables #-}
    -{-# LANGUAGE TypeFamilies #-}
    -{-# LANGUAGE UndecidableInstances #-}
    -{-# OPTIONS_GHC -fno-warn-orphans #-}
    -
    -module TensorFlow.Ops
    -    ( CoreOps.add
    -    , CoreOps.abs
    -    , CoreOps.addN
    -    , CoreOps.argMax
    -    , CoreOps.assign
    -    , CoreOps.broadcastGradientArgs
    -    , CoreOps.cast
    -    , CoreOps.concat
    -    , constant
    -    , CoreOps.equal
    -    , expandDims
    -    , initializedVariable
    -    , zeroInitializedVariable
    -    , CoreOps.fill
    -    , CoreOps.oneHot
    -    , CoreOps.matMul
    -    , matTranspose
    -    , CoreOps.mean
    -    , CoreOps.mul
    -    , CoreOps.neg
    -    , CoreOps.pack
    -    , placeholder
    -    , CoreOps.range
    -    , reducedShape
    -    , CoreOps.relu
    -    , CoreOps.reluGrad
    -    , CoreOps.reshape
    -    , restore
    -    , restoreFromName
    -    , save
    -    , scalar
    -    , shape
    -    , CoreOps.sign
    -    , CoreOps.size
    -    , CoreOps.softmax
    -    , CoreOps.softmaxCrossEntropyWithLogits
    -    , CoreOps.sparseToDense
    -    , CoreOps.sub
    -    , CoreOps.sum
    -    , CoreOps.transpose
    -    , truncatedNormal
    -    , CoreOps.variable
    -    , vector
    -    , zeros
    -    , CoreOps.zerosLike
    -    , scalarize
    -    ) where
    -
    -import Data.ByteString (ByteString)
    -import Data.Complex (Complex)
    -import Data.Int (Int32, Int64)
    -import Prelude hiding (abs, sum, concat)
    -import Data.ProtoLens (def)
    -import Data.Text.Encoding (encodeUtf8)
    -import Lens.Family2 ((.~), (&))
    -import Text.Printf (printf)
    -import Proto.Tensorflow.Core.Framework.Tensor
    -    ( TensorProto
    -    , dtype
    -    , tensorShape
    -    )
    -import qualified Proto.Tensorflow.Core.Framework.TensorShape
    -  as TensorShape
    -import TensorFlow.Build
    -import TensorFlow.BuildOp
    -import TensorFlow.ControlFlow (group)
    -import TensorFlow.Output (unNodeName)
    -import TensorFlow.Tensor
    -import TensorFlow.Types
    -
    -import qualified TensorFlow.GenOps.Core as CoreOps
    -
    -import qualified Prelude (abs)
    -
    --- TODO: Look into hs-boot refactoring to allow mutually recursive imports.
    --- | Must be defined as an orphan because of the dependency order between Ops
    --- and Tensor.
    ---
    --- The indirect constraint "v ~ Value" helps disambiguate types, for example in
    --- "neg 1 :: Tensor Value Float", it helps find the type of the subexpression
    --- "1".
    -instance ( TensorType a
    -         , Num a
    -         , v ~ Value
    -         , OneOf '[ Double, Float, Int32, Int64
    -                  , Complex Float, Complex Double] a) => Num (Tensor v a) where
    -    (+) = CoreOps.add
    -    (*) = CoreOps.mul
    -    (-) = CoreOps.sub
    -    abs = CoreOps.abs
    -    fromInteger = scalar . fromInteger
    -    signum = CoreOps.sign
    -    negate = CoreOps.neg
    -
    -matTranspose :: forall a v . TensorType a
    -             => Tensor v a -> Tensor Value a
    -matTranspose = flip CoreOps.transpose (vector [1, 0 :: Int32])
    -
    -placeholder :: forall a . TensorType a => Shape -> Build (Tensor Value a)
    -placeholder shape' =
    -    buildOp $ opDef "Placeholder"
    -            & opAttr "dtype" .~ tensorType (undefined :: a)
    -            & opAttr "shape" .~ shape'
    -
    --- | Creates a variable initialized to the given value.
    --- Initialization happens next time session runs.
    -initializedVariable :: forall a . TensorType a
    -                    => Tensor Value a -> Build (Tensor Ref a)
    -initializedVariable initializer = do
    -    v <- CoreOps.variable []  -- The shape is not known initially.
    -    (i :: Tensor Ref a) <-
    -        buildOp (opDef "Assign"
    -                 & opAttr "T" .~ tensorType (undefined :: a)
    -                 & opAttr "use_locking" .~ True
    -                 & opAttr "validate_shape" .~ False
    -                 )
    -        v initializer
    -    addInitializer =<< group i
    -    return v
    -
    --- | Creates a zero-initialized variable with the given shape.
    -zeroInitializedVariable
    -  :: (TensorType a, Num a) =>
    -     TensorFlow.Types.Shape -> Build (Tensor TensorFlow.Tensor.Ref a)
    -zeroInitializedVariable = initializedVariable . zeros
    -
    --- TODO: Support heterogeneous list of tensors.
    -save :: forall a v . TensorType a
    -        => ByteString     -- ^ File path.
    -        -> [Tensor v a]  -- ^ Tensors to save.
    -        -> Build ControlNode
    -save path xs = do
    -    let toByteStringTensor = scalar . encodeUtf8 . unNodeName
    -    names <- mapM (fmap toByteStringTensor . renderNodeName) xs
    -    let types = replicate (length xs) (tensorType (undefined :: a))
    -    let saveOp = buildOp $ opDef "Save"
    -                         & opAttr "T" .~ types
    -    saveOp (scalar path) (CoreOps.pack names) xs
    -
    --- | Restore a tensor's value from a checkpoint file.
    ---
    --- This version allows restoring from a checkpoint file that uses a different
    --- tensor name than the variable.
    -restoreFromName :: forall a . TensorType a
    -                => ByteString    -- ^ File path.
    -                -> ByteString    -- ^ Tensor name override.
    -                -> Tensor Ref a  -- ^ Tensor to restore.
    -                -> Build ControlNode
    -restoreFromName path name x = do
    -    let restoreOp = buildOp $ opDef "Restore"
    -                            & opAttr "dt" .~ tensorType (undefined :: a)
    -    group =<< CoreOps.assign x
    -                (restoreOp (scalar path) (scalar name) :: Tensor Value a)
    -
    --- | Restore a tensor's value from a checkpoint file.
    -restore :: forall a . TensorType a
    -        => ByteString    -- ^ File path.
    -        -> Tensor Ref a  -- ^ Tensor to restore.
    -        -> Build ControlNode
    -restore path x = do
    -    name <- encodeUtf8 . unNodeName <$> renderNodeName x
    -    restoreFromName path name x
    -
    --- | Create a constant tensor.
    ---
    --- The values should be in row major order, e.g.,
    ---
    ---   element 0:   index (0, ..., 0)
    ---   element 1:   index (0, ..., 1)
    ---   ...
    -constant :: forall a . TensorType a => Shape -> [a] -> Tensor Value a
    -constant (Shape shape') values
    -    | invalidLength = error invalidLengthMsg
    -    | otherwise = buildOp $ opDef "Const"
    -                          & opAttr "value" .~ typedNode
    -                          & opAttr "dtype" .~ nodeType
    -  where
    -    invalidLength = product shape' /= fromIntegral (length values)
    -    invalidLengthMsg = printf "invalid tensor length: expected %d got %d"
    -                              (product shape')
    -                              (length values)
    -    nodeType = tensorType (undefined :: a)
    -    typedNode :: TensorProto
    -    typedNode = def
    -                & dtype .~ nodeType
    -                & tensorShape.TensorShape.dim .~
    -                      [def & TensorShape.size .~ x | x <- shape']
    -                & tensorVal .~ values
    -
    --- | Reshape a N-D tensor down to a scalar.
    --- 
    --- See `TensorFlow.GenOps.Core.reshape`.
    -scalarize :: (TensorType a) => Tensor v a -> Tensor Value a
    -scalarize t = CoreOps.reshape t (vector scalarShape)
    -    where
    -        scalarShape = [] :: [Int32]
    -
    -
    --- | Create a constant vector.
    -vector :: TensorType a => [a] -> Tensor Value a
    -vector xs = constant [fromIntegral $ length xs] xs
    -
    --- | Create a constant scalar.
    -scalar :: forall a . TensorType a => a -> Tensor Value a
    -scalar x = constant [] [x]
    -
    --- Random tensor from the unit normal distribution with bounded values.
    -truncatedNormal :: forall a v . TensorType a
    -                => Tensor v Int64  -- ^ Shape.
    -                -> Build (Tensor Value a)
    -truncatedNormal = buildOp $ opDef "TruncatedNormal"
    -                          & opAttr "dtype" .~ tensorType (undefined :: a)
    -                          & opAttr "T" .~ tensorType (undefined :: Int64)
    -
    -zeros :: forall a . (Num a, TensorType a) => Shape -> Tensor Value a
    -zeros (Shape shape') = CoreOps.fill (vector $ map fromIntegral shape') (scalar 0)
    -
    -shape :: (TensorType t) => Tensor v1 t -> Tensor Value Int32
    -shape = CoreOps.shape
    -
    -expandDims :: (TensorType t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t
    -expandDims = CoreOps.expandDims
    -
    --- | Helper function for reduction ops (translation of math_ops.reduced_shape).
    -reducedShape :: (OneOf '[ Int32, Int64 ] t1, OneOf '[ Int32, Int64 ] t2) =>
    -                Tensor v1 t1 -> Tensor v2 t2 -> Tensor Value Int32
    -reducedShape inputShape axes =
    -    let inputShape32 = toInt32 inputShape         -- [2, 3, 5, 7]
    -        axes32 = toInt32 axes                     -- [1, 2]
    -        toInt32 x = CoreOps.cast x :: Tensor Value Int32
    -        inputRank = CoreOps.size inputShape32     -- 4
    -        axesMod = (axes32 + inputRank) `CoreOps.mod` inputRank
    -        axesShape = shape axesMod                 -- [2]
    -    in CoreOps.dynamicStitch                      -- [2, 1, 1, 7]
    -         [CoreOps.range 0 inputRank 1,            -- [0, 1, 2, 3]
    -           axesMod]                               -- [1, 2]
    -         [inputShape32,                           -- [2, 3, 5, 7]
    -           CoreOps.fill axesShape 1]              -- [1, 1]
    -
    - diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-ops-0.1.0.0/src/hscolour.css deleted file mode 100644 index c15919e..0000000 --- a/docs/haddock/tensorflow-ops-0.1.0.0/src/hscolour.css +++ /dev/null @@ -1,5 +0,0 @@ -.hs-keyglyph, .hs-layout {color: red;} -.hs-keyword {color: blue;} -.hs-comment, .hs-comment a {color: green;} -.hs-str, .hs-chr {color: teal;} -.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt b/docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt index caff083..ecf2a9a 100644 --- a/docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt +++ b/docs/haddock/tensorflow-ops-0.1.0.0/tensorflow-ops.txt @@ -58,7 +58,8 @@ module TensorFlow.Ops --
  • NOTE*: Add supports broadcasting. AddN does not. -- More about broadcasting here
  • -- -add :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t +add :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +add' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *)))))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes the absolute value of a tensor. -- @@ -66,40 +67,48 @@ add :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) -- the absolute value of each element in x. For example, if x is -- an input element and y is an output element, this operation computes -- \(y = |x|\). -abs :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t) => Tensor v1 t -> Tensor Value t +abs :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t => Tensor v'1 t -> Tensor Build t +abs' :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))) t => OpParams -> Tensor v'1 t -> Tensor Build t -- | Add all input tensors element wise. -addN :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) => [Tensor v1 t] -> Tensor Value t +addN :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => [Tensor v'1 t] -> Tensor Build t +addN' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => OpParams -> [Tensor v'1 t] -> Tensor Build t -- | Returns the index with the largest value across dimensions of a -- tensor. -argMax :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value Int64 +argMax :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 +argMax' :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build Int64 -- | Update ref by assigning value to it. -- -- This operation outputs "ref" after the assignment is done. This makes -- it easier to chain operations that need to use the reset value. -assign :: TensorType t => Tensor Ref t -> Tensor v2 t -> Build (Tensor Ref t) +assign :: (MonadBuild m', TensorType t) => Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) +assign' :: (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -> Tensor v'2 t -> m' (Tensor Ref t) -- | Return the reduction indices for computing gradients of s0 op s1 with -- broadcast. -- -- This is typically used by gradient computations for a broadcasting -- operation. -broadcastGradientArgs :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) t) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value t) +broadcastGradientArgs :: OneOf ((:) * Int32 ((:) * Int64 ([] *))) t => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) +broadcastGradientArgs' :: OneOf ((:) * Int32 ((:) * Int64 ([] *))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) -- | Cast x of type SrcT to y of DstT. -cast :: (TensorType srcT, TensorType dstT) => Tensor v1 srcT -> Tensor Value dstT +cast :: (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -> Tensor Build dstT +cast' :: (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -> Tensor Build dstT -- | Concatenates tensors along one dimension. -concat :: TensorType t => Tensor v1 Int32 -> [Tensor v2 t] -> Tensor Value t +concat :: TensorType t => Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t +concat' :: TensorType t => OpParams -> Tensor v'1 Int32 -> [Tensor v'2 t] -> Tensor Build t -- | Create a constant tensor. -- -- The values should be in row major order, e.g., -- -- element 0: index (0, ..., 0) element 1: index (0, ..., 1) ... -constant :: TensorType a => Shape -> [a] -> Tensor Value a +constant :: TensorType a => Shape -> [a] -> Tensor Build a +constant' :: TensorType a => OpParams -> Shape -> [a] -> Tensor Build a -- | Returns the truth value of (x == y) element-wise. -- @@ -107,15 +116,19 @@ constant :: TensorType a => Shape -> [a] -> Tensor Value a --
  • NOTE*: Equal supports broadcasting. More about -- broadcasting here
  • -- -equal :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value Bool -expandDims :: (TensorType t) => Tensor v1 t -> Tensor v2 Int32 -> Tensor Value t +equal :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool +equal' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Bool ((:) * ByteString ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build Bool +expandDims :: TensorType t => Tensor v1 t -> Tensor v2 Int32 -> Tensor Build t +expandDims' :: TensorType t => OpParams -> Tensor v1 t -> Tensor v2 Int32 -> Tensor Build t -- | Creates a variable initialized to the given value. Initialization -- happens next time session runs. -initializedVariable :: TensorType a => Tensor Value a -> Build (Tensor Ref a) +initializedVariable :: (MonadBuild m, TensorType a) => Tensor v a -> m (Tensor Ref a) +initializedVariable' :: (MonadBuild m, TensorType a) => OpParams -> Tensor v a -> m (Tensor Ref a) -- | Creates a zero-initialized variable with the given shape. -zeroInitializedVariable :: (TensorType a, Num a) => Shape -> Build (Tensor Ref a) +zeroInitializedVariable :: (MonadBuild m, TensorType a, Num a) => Shape -> m (Tensor Ref a) +zeroInitializedVariable' :: (MonadBuild m, TensorType a, Num a) => OpParams -> Shape -> m (Tensor Ref a) -- | Creates a tensor filled with a scalar value. -- @@ -126,7 +139,54 @@ zeroInitializedVariable :: (TensorType a, Num a) => Shape -> Build (Tensor Ref a -- -- ```prettyprint # Output tensor has shape [2, 3]. fill([2, 3], 9) -- ==> [[9, 9, 9] [9, 9, 9]] ``` -fill :: TensorType t => Tensor v1 Int32 -> Tensor v2 t -> Tensor Value t +fill :: TensorType t => Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t +fill' :: TensorType t => OpParams -> Tensor v'1 Int32 -> Tensor v'2 t -> Tensor Build t + +-- | Return a tensor with the same shape and contents as the input tensor +-- or value. +identity :: TensorType t => Tensor v'1 t -> Tensor Build t +identity' :: TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t + +-- | Multiply the matrix "a" by the matrix "b". +-- +-- The inputs must be two-dimensional matrices and the inner dimension of +-- "a" (after being transposed if transpose_a is true) must match the +-- outer dimension of "b" (after being transposed if transposed_b is +-- true). +-- +--
      +--
    • Note*: The default kernel implementation for MatMul on GPUs uses +-- cublas.
    • +--
    +matMul :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +matMul' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +matTranspose :: TensorType a => Tensor e a -> Tensor Build a +matTranspose' :: TensorType a => OpParams -> Tensor v a -> Tensor Build a + +-- | Computes the mean of elements across dimensions of a tensor. +-- +-- Reduces input along the dimensions given in +-- reduction_indices. Unless keep_dims is true, the +-- rank of the tensor is reduced by 1 for each entry in +-- reduction_indices. If keep_dims is true, the reduced +-- dimensions are retained with length 1. +mean :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t +mean' :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t + +-- | Returns x * y element-wise. +-- +--
      +--
    • NOTE*: Mul supports broadcasting. More about broadcasting +-- here
    • +--
    +mul :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +mul' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t + +-- | Computes numerical negative value element-wise. +-- +-- I.e., \(y = -x\). +neg :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => Tensor v'1 t -> Tensor Build t +neg' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns a one-hot tensor. -- @@ -181,43 +241,8 @@ fill :: TensorType t => Tensor v1 Int32 -> Tensor v2 t -> Tensor Value t -- ```output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // -- one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // -- one_hot(-1) ]``` -oneHot :: (TensorType t, TensorType tI, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) => Tensor v1 tI -> Tensor v2 Int32 -> Tensor v3 t -> Tensor v4 t -> Tensor Value t - --- | Multiply the matrix "a" by the matrix "b". --- --- The inputs must be two-dimensional matrices and the inner dimension of --- "a" (after being transposed if transpose_a is true) must match the --- outer dimension of "b" (after being transposed if transposed_b is --- true). --- ---
      ---
    • Note*: The default kernel implementation for MatMul on GPUs uses --- cublas.
    • ---
    -matMul :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Word16 ((:) * Double ((:) * Float ([] *))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t -matTranspose :: TensorType a => Tensor v a -> Tensor Value a - --- | Computes the mean of elements across dimensions of a tensor. --- --- Reduces input along the dimensions given in --- reduction_indices. Unless keep_dims is true, the --- rank of the tensor is reduced by 1 for each entry in --- reduction_indices. If keep_dims is true, the reduced --- dimensions are retained with length 1. -mean :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t - --- | Returns x * y element-wise. --- ---
      ---
    • NOTE*: Mul supports broadcasting. More about broadcasting --- here
    • ---
    -mul :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t - --- | Computes numerical negative value element-wise. --- --- I.e., \(y = -x\). -neg :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) => Tensor v1 t -> Tensor Value t +oneHot :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) => Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t +oneHot' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ((:) * Word8 ([] *)))) tI) => OpParams -> Tensor v'1 tI -> Tensor v'2 Int32 -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t -- | Packs a list of N rank-R tensors into one -- rank-`(R+1)` tensor. @@ -239,8 +264,10 @@ neg :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) -- 6]] ``` -- -- This is the opposite of unpack. -pack :: TensorType t => [Tensor v1 t] -> Tensor Value t -placeholder :: TensorType a => Shape -> Build (Tensor Value a) +pack :: TensorType t => [Tensor v'1 t] -> Tensor Build t +pack' :: TensorType t => OpParams -> [Tensor v'1 t] -> Tensor Build t +placeholder :: (MonadBuild m, TensorType a) => Shape -> m (Tensor Value a) +placeholder' :: (MonadBuild m, TensorType a) => OpParams -> Shape -> m (Tensor Value a) -- | Creates a sequence of numbers. -- @@ -252,17 +279,20 @@ placeholder :: TensorType a => Shape -> Build (Tensor Value a) -- -- ``` # start is 3 # limit is 18 # delta is 3 -- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] ``` -range :: (TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx) => Tensor v1 tidx -> Tensor v2 tidx -> Tensor v3 tidx -> Tensor Value tidx +range :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx => Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx +range' :: OneOf ((:) * Int32 ((:) * Int64 ((:) * Double ((:) * Float ([] *))))) tidx => OpParams -> Tensor v'1 tidx -> Tensor v'2 tidx -> Tensor v'3 tidx -> Tensor Build tidx -- | Helper function for reduction ops (translation of -- math_ops.reduced_shape). -reducedShape :: (OneOf '[Int32, Int64] t1, OneOf '[Int32, Int64] t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Value Int32 +reducedShape :: (OneOf '[Int32, Int64] t1, OneOf '[Int32, Int64] t2) => Tensor v1 t1 -> Tensor v2 t2 -> Tensor Build Int32 -- | Computes rectified linear: `max(features, 0)`. -relu :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) => Tensor v1 t -> Tensor Value t +relu :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => Tensor v'1 t -> Tensor Build t +relu' :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes rectified linear gradients for a Relu operation. -reluGrad :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t +reluGrad :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +reluGrad' :: OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Reshapes a tensor. -- @@ -305,21 +335,24 @@ reluGrad :: (TensorType t, OneOf ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * I -- -- # tensor t is [7] # shape `[]` reshapes to a scalar -- reshape(t, []) ==> 7 ``` -reshape :: (TensorType t, TensorType tshape, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) => Tensor v1 t -> Tensor v2 tshape -> Tensor Value t +reshape :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) => Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t +reshape' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tshape) => OpParams -> Tensor v'1 t -> Tensor v'2 tshape -> Tensor Build t -- | Restore a tensor's value from a checkpoint file. -restore :: TensorType a => ByteString -> Tensor Ref a -> Build ControlNode +restore :: (MonadBuild m, TensorType a) => ByteString -> Tensor Ref a -> m ControlNode -- | Restore a tensor's value from a checkpoint file. -- -- This version allows restoring from a checkpoint file that uses a -- different tensor name than the variable. -restoreFromName :: TensorType a => ByteString -> ByteString -> Tensor Ref a -> Build ControlNode -save :: TensorType a => ByteString -> [Tensor v a] -> Build ControlNode +restoreFromName :: (MonadBuild m, TensorType a) => ByteString -> ByteString -> Tensor Ref a -> m ControlNode +save :: (Rendered v, MonadBuild m, TensorType a) => ByteString -> [Tensor v a] -> m ControlNode -- | Create a constant scalar. -scalar :: TensorType a => a -> Tensor Value a -shape :: (TensorType t) => Tensor v1 t -> Tensor Value Int32 +scalar :: TensorType a => a -> Tensor Build a +scalar' :: TensorType a => OpParams -> a -> Tensor Build a +shape :: TensorType t => Tensor v t -> Tensor Build Int32 +shape' :: TensorType t => OpParams -> Tensor v t -> Tensor Build Int32 -- | Returns an element-wise indication of the sign of a number. -- @@ -327,7 +360,8 @@ shape :: (TensorType t) => Tensor v1 t -> Tensor Value Int32 -- -- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y -- = 0`. -sign :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) => Tensor v1 t -> Tensor Value t +sign :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => Tensor v'1 t -> Tensor Build t +sign' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => OpParams -> Tensor v'1 t -> Tensor Build t -- | Returns the size of a tensor. -- @@ -338,19 +372,22 @@ sign :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) -- -- ```prettyprint # t is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], -- [4, 4, 4]]]] size(t) ==> 12 ``` -size :: (TensorType t, TensorType out_type, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) => Tensor v1 t -> Tensor Value out_type +size :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) => Tensor v'1 t -> Tensor Build out_type +size' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) out_type) => OpParams -> Tensor v'1 t -> Tensor Build out_type -- | Computes softmax activations. -- -- For each batch i and class j we have -- -- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j])) -softmax :: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) => Tensor v1 t -> Tensor Value t +softmax :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => Tensor v'1 t -> Tensor Build t +softmax' :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => OpParams -> Tensor v'1 t -> Tensor Build t -- | Computes softmax cross entropy cost and gradients to backpropagate. -- -- Inputs are the logits, not probabilities. -softmaxCrossEntropyWithLogits :: (TensorType t, OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t) => Tensor v1 t -> Tensor v2 t -> (Tensor Value t, Tensor Value t) +softmaxCrossEntropyWithLogits :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) +softmaxCrossEntropyWithLogits' :: OneOf ((:) * Word16 ((:) * Double ((:) * Float ([] *)))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> (Tensor Build t, Tensor Build t) -- | Converts a sparse representation into a dense tensor. -- @@ -374,7 +411,8 @@ softmaxCrossEntropyWithLogits :: (TensorType t, OneOf ((:) * Word16 ((:) * Doubl -- Indices should be sorted in lexicographic order, and indices must not -- contain any repeats. If validate_indices is true, these -- properties are checked during execution. -sparseToDense :: (TensorType t, TensorType tindices, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) => Tensor v1 tindices -> Tensor v2 tindices -> Tensor v3 t -> Tensor v4 t -> Tensor Value t +sparseToDense :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) => Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t +sparseToDense' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tindices) => OpParams -> Tensor v'1 tindices -> Tensor v'2 tindices -> Tensor v'3 t -> Tensor v'4 t -> Tensor Build t -- | Returns x - y element-wise. -- @@ -382,7 +420,8 @@ sparseToDense :: (TensorType t, TensorType tindices, OneOf ((:) * Int32 ((:) * I --
  • NOTE*: Sub supports broadcasting. More about broadcasting -- here
  • -- -sub :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t) => Tensor v1 t -> Tensor v2 t -> Tensor Value t +sub :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => Tensor v'1 t -> Tensor v'2 t -> Tensor Build t +sub' :: OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int32 ((:) * Int64 ((:) * Word16 ((:) * Double ((:) * Float ([] *)))))))) t => OpParams -> Tensor v'1 t -> Tensor v'2 t -> Tensor Build t -- | Computes the sum of elements across dimensions of a tensor. -- @@ -391,35 +430,41 @@ sub :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) -- rank of the tensor is reduced by 1 for each entry in -- reduction_indices. If keep_dims is true, the reduced -- dimensions are retained with length 1. -sum :: (TensorType t, OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, TensorType tidx, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v1 t -> Tensor v2 tidx -> Tensor Value t +sum :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t +sum' :: (OneOf ((:) * (Complex Double) ((:) * (Complex Float) ((:) * Int16 ((:) * Int32 ((:) * Int64 ((:) * Int8 ((:) * Word16 ((:) * Word8 ((:) * Double ((:) * Float ([] *))))))))))) t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tidx) => OpParams -> Tensor v'1 t -> Tensor v'2 tidx -> Tensor Build t -- | Shuffle dimensions of x according to a permutation. -- -- The output y has the same rank as x. The shapes of -- x and y satisfy: `y.shape[i] == x.shape[perm[i]] for -- i in [0, 1, ..., rank(x) - 1]` -transpose :: (TensorType t, TensorType tperm, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) => Tensor v1 t -> Tensor v2 tperm -> Tensor Value t -truncatedNormal :: TensorType a => Tensor v Int64 -> Build (Tensor Value a) +transpose :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) => Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t +transpose' :: (TensorType t, OneOf ((:) * Int32 ((:) * Int64 ([] *))) tperm) => OpParams -> Tensor v'1 t -> Tensor v'2 tperm -> Tensor Build t --- | Holds state in the form of a tensor that persists across steps. +-- | Random tensor from the unit normal distribution with bounded values. -- --- Outputs a ref to the tensor state so it may be read or modified. --- TODO(zhifengc/mrry): Adds a pointer to a more detail document about --- sharing states in tensorflow. -variable :: TensorType dtype => Shape -> Build (Tensor Ref dtype) +-- This is a type-restricted version of truncatedNormal. +truncatedNormal :: (MonadBuild m, OneOf '[Word16, Double, Float] a) => Tensor v Int64 -> m (Tensor Value a) +truncatedNormal' :: (MonadBuild m, OneOf '[Word16, Double, Float] a) => OpParams -> Tensor v Int64 -> m (Tensor Value a) + +-- | Use VariableV2 instead. +variable :: (MonadBuild m', TensorType dtype) => Shape -> m' (Tensor Ref dtype) +variable' :: (MonadBuild m', TensorType dtype) => OpParams -> Shape -> m' (Tensor Ref dtype) -- | Create a constant vector. -vector :: TensorType a => [a] -> Tensor Value a -zeros :: (Num a, TensorType a) => Shape -> Tensor Value a +vector :: TensorType a => [a] -> Tensor Build a +vector' :: TensorType a => OpParams -> [a] -> Tensor Build a +zeros :: (Num a, TensorType a) => Shape -> Tensor Build a -- | Returns a tensor of zeros with the same shape and type as x. -zerosLike :: TensorType t => Tensor v1 t -> Tensor Value t +zerosLike :: TensorType t => Tensor v'1 t -> Tensor Build t +zerosLike' :: TensorType t => OpParams -> Tensor v'1 t -> Tensor Build t -- | Reshape a N-D tensor down to a scalar. -- -- See reshape. -scalarize :: (TensorType a) => Tensor v a -> Tensor Value a -instance (TensorFlow.Types.TensorType a, GHC.Num.Num a, v ~ TensorFlow.Tensor.Value, TensorFlow.Types.OneOf '[GHC.Types.Double, GHC.Types.Float, GHC.Int.Int32, GHC.Int.Int64, Data.Complex.Complex GHC.Types.Float, Data.Complex.Complex GHC.Types.Double] a) => GHC.Num.Num (TensorFlow.Tensor.Tensor v a) +scalarize :: TensorType a => Tensor v a -> Tensor Build a +instance (TensorFlow.Types.TensorType a, GHC.Num.Num a, v ~ TensorFlow.Build.Build, TensorFlow.Types.OneOf '[GHC.Types.Double, GHC.Types.Float, GHC.Int.Int32, GHC.Int.Int64, Data.Complex.Complex GHC.Types.Float, Data.Complex.Complex GHC.Types.Double] a) => GHC.Num.Num (TensorFlow.Tensor.Tensor v a) -- | Parallel lookups on the list of tensors. @@ -438,9 +483,9 @@ module TensorFlow.EmbeddingOps -- -- The results of the lookup are concatenated into a dense tensor. The -- returned tensor has shape `shape(ids) + shape(params)[1:]`. -embeddingLookup :: (TensorType a, OneOf '[Int64, Int32] b, Num b) => [Tensor v a] -> Tensor Value b -> Build (Tensor Value a) +embeddingLookup :: (MonadBuild m, Rendered v1, TensorType a, OneOf '[Int64, Int32] b, Num b) => [Tensor v1 a] -> Tensor v2 b -> m (Tensor Value a) module TensorFlow.Gradient -- | Gradient of y w.r.t. each element of xs. -gradients :: (Num (Tensor v1 a), v1 ~ Value, GradientCompatible a) => Tensor v1 a -> [Tensor v2 a] -> Build [Tensor Value a] +gradients :: (MonadBuild m, Rendered v2, GradientCompatible a) => Tensor v1 a -> [Tensor v2 a] -> m [Tensor Value a] diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html index b53fcd0..d61dd5f 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-AttrValue.html @@ -1,4 +1,4 @@ Proto.Tensorflow.Core.Framework.AttrValue

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.AttrValue

    Documentation

    data AttrValue Source

    Instances

    Eq AttrValue Source 
    Show AttrValue Source 
    Default AttrValue Source 
    Message AttrValue Source 
    HasField "b" AttrValue AttrValue Source 
    HasField "f" AttrValue AttrValue Source 
    HasField "func" AttrValue AttrValue Source 
    HasField "i" AttrValue AttrValue Source 
    HasField "list" AttrValue AttrValue Source 
    HasField "maybe'b" AttrValue AttrValue Source 
    HasField "maybe'f" AttrValue AttrValue Source 
    HasField "maybe'func" AttrValue AttrValue Source 
    HasField "maybe'i" AttrValue AttrValue Source 
    HasField "maybe'list" AttrValue AttrValue Source 
    HasField "maybe'placeholder" AttrValue AttrValue Source 
    HasField "maybe's" AttrValue AttrValue Source 
    HasField "maybe'shape" AttrValue AttrValue Source 
    HasField "maybe'tensor" AttrValue AttrValue Source 
    HasField "maybe'type'" AttrValue AttrValue Source 
    HasField "placeholder" AttrValue AttrValue Source 
    HasField "s" AttrValue AttrValue Source 
    HasField "shape" AttrValue AttrValue Source 
    HasField "tensor" AttrValue AttrValue Source 
    HasField "type'" AttrValue AttrValue Source 
    type Field "b" AttrValue = Bool Source 
    type Field "f" AttrValue = Float Source 
    type Field "func" AttrValue = NameAttrList Source 
    type Field "i" AttrValue = Int64 Source 
    type Field "list" AttrValue = AttrValue'ListValue Source 
    type Field "maybe'b" AttrValue = Maybe Bool Source 
    type Field "maybe'f" AttrValue = Maybe Float Source 
    type Field "maybe'func" AttrValue = Maybe NameAttrList Source 
    type Field "maybe'i" AttrValue = Maybe Int64 Source 
    type Field "maybe'list" AttrValue = Maybe AttrValue'ListValue Source 
    type Field "maybe'placeholder" AttrValue = Maybe Text Source 
    type Field "maybe's" AttrValue = Maybe ByteString Source 
    type Field "maybe'shape" AttrValue = Maybe TensorShapeProto Source 
    type Field "maybe'tensor" AttrValue = Maybe TensorProto Source 
    type Field "maybe'type'" AttrValue = Maybe DataType Source 
    type Field "placeholder" AttrValue = Text Source 
    type Field "s" AttrValue = ByteString Source 
    type Field "shape" AttrValue = TensorShapeProto Source 
    type Field "tensor" AttrValue = TensorProto Source 
    type Field "type'" AttrValue = DataType Source 

    attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') Source

    b :: forall msg msg'. HasField "b" msg msg' => Lens msg msg' (Field "b" msg) (Field "b" msg') Source

    f :: forall msg msg'. HasField "f" msg msg' => Lens msg msg' (Field "f" msg) (Field "f" msg') Source

    func :: forall msg msg'. HasField "func" msg msg' => Lens msg msg' (Field "func" msg) (Field "func" msg') Source

    i :: forall msg msg'. HasField "i" msg msg' => Lens msg msg' (Field "i" msg) (Field "i" msg') Source

    key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') Source

    list :: forall msg msg'. HasField "list" msg msg' => Lens msg msg' (Field "list" msg) (Field "list" msg') Source

    maybe'b :: forall msg msg'. HasField "maybe'b" msg msg' => Lens msg msg' (Field "maybe'b" msg) (Field "maybe'b" msg') Source

    maybe'f :: forall msg msg'. HasField "maybe'f" msg msg' => Lens msg msg' (Field "maybe'f" msg) (Field "maybe'f" msg') Source

    maybe'func :: forall msg msg'. HasField "maybe'func" msg msg' => Lens msg msg' (Field "maybe'func" msg) (Field "maybe'func" msg') Source

    maybe'i :: forall msg msg'. HasField "maybe'i" msg msg' => Lens msg msg' (Field "maybe'i" msg) (Field "maybe'i" msg') Source

    maybe'list :: forall msg msg'. HasField "maybe'list" msg msg' => Lens msg msg' (Field "maybe'list" msg) (Field "maybe'list" msg') Source

    maybe'placeholder :: forall msg msg'. HasField "maybe'placeholder" msg msg' => Lens msg msg' (Field "maybe'placeholder" msg) (Field "maybe'placeholder" msg') Source

    maybe's :: forall msg msg'. HasField "maybe's" msg msg' => Lens msg msg' (Field "maybe's" msg) (Field "maybe's" msg') Source

    maybe'shape :: forall msg msg'. HasField "maybe'shape" msg msg' => Lens msg msg' (Field "maybe'shape" msg) (Field "maybe'shape" msg') Source

    maybe'tensor :: forall msg msg'. HasField "maybe'tensor" msg msg' => Lens msg msg' (Field "maybe'tensor" msg) (Field "maybe'tensor" msg') Source

    maybe'type' :: forall msg msg'. HasField "maybe'type'" msg msg' => Lens msg msg' (Field "maybe'type'" msg) (Field "maybe'type'" msg') Source

    maybe'value :: forall msg msg'. HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg') Source

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') Source

    placeholder :: forall msg msg'. HasField "placeholder" msg msg' => Lens msg msg' (Field "placeholder" msg) (Field "placeholder" msg') Source

    s :: forall msg msg'. HasField "s" msg msg' => Lens msg msg' (Field "s" msg) (Field "s" msg') Source

    shape :: forall msg msg'. HasField "shape" msg msg' => Lens msg msg' (Field "shape" msg) (Field "shape" msg') Source

    tensor :: forall msg msg'. HasField "tensor" msg msg' => Lens msg msg' (Field "tensor" msg) (Field "tensor" msg') Source

    type' :: forall msg msg'. HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg') Source

    value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') Source

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.AttrValue

    Documentation

    data AttrValue

    Instances

    Eq AttrValue 
    Show AttrValue 
    Message AttrValue 
    Default AttrValue 
    HasField "b" AttrValue AttrValue 
    HasField "f" AttrValue AttrValue 
    HasField "func" AttrValue AttrValue 
    HasField "i" AttrValue AttrValue 
    HasField "list" AttrValue AttrValue 
    HasField "maybe'b" AttrValue AttrValue 
    HasField "maybe'f" AttrValue AttrValue 
    HasField "maybe'func" AttrValue AttrValue 
    HasField "maybe'i" AttrValue AttrValue 
    HasField "maybe'list" AttrValue AttrValue 
    HasField "maybe'placeholder" AttrValue AttrValue 
    HasField "maybe's" AttrValue AttrValue 
    HasField "maybe'shape" AttrValue AttrValue 
    HasField "maybe'tensor" AttrValue AttrValue 
    HasField "maybe'type'" AttrValue AttrValue 
    HasField "placeholder" AttrValue AttrValue 
    HasField "s" AttrValue AttrValue 
    HasField "shape" AttrValue AttrValue 
    HasField "tensor" AttrValue AttrValue 
    HasField "type'" AttrValue AttrValue 
    type Field "b" AttrValue = Bool 
    type Field "f" AttrValue = Float 
    type Field "func" AttrValue = NameAttrList 
    type Field "i" AttrValue = Int64 
    type Field "list" AttrValue = AttrValue'ListValue 
    type Field "maybe'b" AttrValue = Maybe Bool 
    type Field "maybe'f" AttrValue = Maybe Float 
    type Field "maybe'func" AttrValue = Maybe NameAttrList 
    type Field "maybe'i" AttrValue = Maybe Int64 
    type Field "maybe'list" AttrValue = Maybe AttrValue'ListValue 
    type Field "maybe'placeholder" AttrValue = Maybe Text 
    type Field "maybe's" AttrValue = Maybe ByteString 
    type Field "maybe'shape" AttrValue = Maybe TensorShapeProto 
    type Field "maybe'tensor" AttrValue = Maybe TensorProto 
    type Field "maybe'type'" AttrValue = Maybe DataType 
    type Field "placeholder" AttrValue = Text 
    type Field "s" AttrValue = ByteString 
    type Field "shape" AttrValue = TensorShapeProto 
    type Field "tensor" AttrValue = TensorProto 
    type Field "type'" AttrValue = DataType 

    data NameAttrList

    Constructors

    NameAttrList 

    Fields

    _NameAttrList'name :: !Text
     
    _NameAttrList'attr :: !(Map Text AttrValue)
     

    Instances

    Eq NameAttrList 
    Show NameAttrList 
    Message NameAttrList 
    Default NameAttrList 
    HasField "attr" NameAttrList NameAttrList 
    HasField "name" NameAttrList NameAttrList 
    type Field "attr" NameAttrList = Map Text AttrValue 
    type Field "name" NameAttrList = Text 

    attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg')

    b :: forall msg msg'. HasField "b" msg msg' => Lens msg msg' (Field "b" msg) (Field "b" msg')

    f :: forall msg msg'. HasField "f" msg msg' => Lens msg msg' (Field "f" msg) (Field "f" msg')

    func :: forall msg msg'. HasField "func" msg msg' => Lens msg msg' (Field "func" msg) (Field "func" msg')

    i :: forall msg msg'. HasField "i" msg msg' => Lens msg msg' (Field "i" msg) (Field "i" msg')

    key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg')

    list :: forall msg msg'. HasField "list" msg msg' => Lens msg msg' (Field "list" msg) (Field "list" msg')

    maybe'b :: forall msg msg'. HasField "maybe'b" msg msg' => Lens msg msg' (Field "maybe'b" msg) (Field "maybe'b" msg')

    maybe'f :: forall msg msg'. HasField "maybe'f" msg msg' => Lens msg msg' (Field "maybe'f" msg) (Field "maybe'f" msg')

    maybe'func :: forall msg msg'. HasField "maybe'func" msg msg' => Lens msg msg' (Field "maybe'func" msg) (Field "maybe'func" msg')

    maybe'i :: forall msg msg'. HasField "maybe'i" msg msg' => Lens msg msg' (Field "maybe'i" msg) (Field "maybe'i" msg')

    maybe'list :: forall msg msg'. HasField "maybe'list" msg msg' => Lens msg msg' (Field "maybe'list" msg) (Field "maybe'list" msg')

    maybe'placeholder :: forall msg msg'. HasField "maybe'placeholder" msg msg' => Lens msg msg' (Field "maybe'placeholder" msg) (Field "maybe'placeholder" msg')

    maybe's :: forall msg msg'. HasField "maybe's" msg msg' => Lens msg msg' (Field "maybe's" msg) (Field "maybe's" msg')

    maybe'shape :: forall msg msg'. HasField "maybe'shape" msg msg' => Lens msg msg' (Field "maybe'shape" msg) (Field "maybe'shape" msg')

    maybe'tensor :: forall msg msg'. HasField "maybe'tensor" msg msg' => Lens msg msg' (Field "maybe'tensor" msg) (Field "maybe'tensor" msg')

    maybe'type' :: forall msg msg'. HasField "maybe'type'" msg msg' => Lens msg msg' (Field "maybe'type'" msg) (Field "maybe'type'" msg')

    maybe'value :: forall msg msg'. HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg')

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg')

    placeholder :: forall msg msg'. HasField "placeholder" msg msg' => Lens msg msg' (Field "placeholder" msg) (Field "placeholder" msg')

    s :: forall msg msg'. HasField "s" msg msg' => Lens msg msg' (Field "s" msg) (Field "s" msg')

    shape :: forall msg msg'. HasField "shape" msg msg' => Lens msg msg' (Field "shape" msg) (Field "shape" msg')

    tensor :: forall msg msg'. HasField "tensor" msg msg' => Lens msg msg' (Field "tensor" msg) (Field "tensor" msg')

    type' :: forall msg msg'. HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg')

    value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg')

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html index 5fbe7e3..31b7974 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Graph.html @@ -1,4 +1,4 @@ Proto.Tensorflow.Core.Framework.Graph

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.Graph

    Documentation

    data GraphDef Source

    Constructors

    GraphDef 

    Fields

    _GraphDef'node :: [NodeDef]
     
    _GraphDef'versions :: Maybe VersionDef
     
    _GraphDef'version :: Int32
     
    _GraphDef'library :: Maybe FunctionDefLibrary
     

    Instances

    library :: forall msg msg'. HasField "library" msg msg' => Lens msg msg' (Field "library" msg) (Field "library" msg') Source

    maybe'library :: forall msg msg'. HasField "maybe'library" msg msg' => Lens msg msg' (Field "maybe'library" msg) (Field "maybe'library" msg') Source

    maybe'versions :: forall msg msg'. HasField "maybe'versions" msg msg' => Lens msg msg' (Field "maybe'versions" msg) (Field "maybe'versions" msg') Source

    node :: forall msg msg'. HasField "node" msg msg' => Lens msg msg' (Field "node" msg) (Field "node" msg') Source

    version :: forall msg msg'. HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg') Source

    versions :: forall msg msg'. HasField "versions" msg msg' => Lens msg msg' (Field "versions" msg) (Field "versions" msg') Source

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.Graph

    Documentation

    data GraphDef

    Constructors

    GraphDef 

    Fields

    _GraphDef'node :: ![NodeDef]
     
    _GraphDef'versions :: !(Maybe VersionDef)
     
    _GraphDef'version :: !Int32
     
    _GraphDef'library :: !(Maybe FunctionDefLibrary)
     

    Instances

    Eq GraphDef 
    Show GraphDef 
    Message GraphDef 
    Default GraphDef 
    HasField "library" GraphDef GraphDef 
    HasField "maybe'library" GraphDef GraphDef 
    HasField "maybe'versions" GraphDef GraphDef 
    HasField "node" GraphDef GraphDef 
    HasField "version" GraphDef GraphDef 
    HasField "versions" GraphDef GraphDef 
    type Field "library" GraphDef 
    type Field "maybe'library" GraphDef 
    type Field "maybe'versions" GraphDef 
    type Field "node" GraphDef = [NodeDef] 
    type Field "version" GraphDef = Int32 
    type Field "versions" GraphDef 

    library :: forall msg msg'. HasField "library" msg msg' => Lens msg msg' (Field "library" msg) (Field "library" msg')

    maybe'library :: forall msg msg'. HasField "maybe'library" msg msg' => Lens msg msg' (Field "maybe'library" msg) (Field "maybe'library" msg')

    maybe'versions :: forall msg msg'. HasField "maybe'versions" msg msg' => Lens msg msg' (Field "maybe'versions" msg) (Field "maybe'versions" msg')

    node :: forall msg msg'. HasField "node" msg msg' => Lens msg msg' (Field "node" msg) (Field "node" msg')

    version :: forall msg msg'. HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg')

    versions :: forall msg msg'. HasField "versions" msg msg' => Lens msg msg' (Field "versions" msg) (Field "versions" msg')

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html index 96a4b53..8f51107 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-NodeDef.html @@ -1,4 +1,4 @@ Proto.Tensorflow.Core.Framework.NodeDef

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.NodeDef

    Documentation

    attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') Source

    device :: forall msg msg'. HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg') Source

    input :: forall msg msg'. HasField "input" msg msg' => Lens msg msg' (Field "input" msg) (Field "input" msg') Source

    key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') Source

    maybe'value :: forall msg msg'. HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg') Source

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') Source

    op :: forall msg msg'. HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg') Source

    value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') Source

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.NodeDef

    Documentation

    data NodeDef

    Constructors

    NodeDef 

    Fields

    _NodeDef'name :: !Text
     
    _NodeDef'op :: !Text
     
    _NodeDef'input :: ![Text]
     
    _NodeDef'device :: !Text
     
    _NodeDef'attr :: !(Map Text AttrValue)
     

    Instances

    Eq NodeDef 
    Show NodeDef 
    Message NodeDef 
    Default NodeDef 
    HasField "attr" NodeDef NodeDef 
    HasField "device" NodeDef NodeDef 
    HasField "input" NodeDef NodeDef 
    HasField "name" NodeDef NodeDef 
    HasField "op" NodeDef NodeDef 
    type Field "attr" NodeDef = Map Text AttrValue 
    type Field "device" NodeDef = Text 
    type Field "input" NodeDef = [Text] 
    type Field "name" NodeDef = Text 
    type Field "op" NodeDef = Text 

    attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg')

    device :: forall msg msg'. HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg')

    input :: forall msg msg'. HasField "input" msg msg' => Lens msg msg' (Field "input" msg) (Field "input" msg')

    key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg')

    maybe'value :: forall msg msg'. HasField "maybe'value" msg msg' => Lens msg msg' (Field "maybe'value" msg) (Field "maybe'value" msg')

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg')

    op :: forall msg msg'. HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg')

    value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg')

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html index c621aae..8dce10d 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-OpDef.html @@ -1,4 +1,4 @@ Proto.Tensorflow.Core.Framework.OpDef

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.OpDef

    Documentation

    data OpDef Source

    Instances

    Eq OpDef Source 
    Show OpDef Source 
    Default OpDef Source 
    Message OpDef Source 
    HasField "allowsUninitializedInput" OpDef OpDef Source 
    HasField "attr" OpDef OpDef Source 
    HasField "deprecation" OpDef OpDef Source 
    HasField "description" OpDef OpDef Source 
    HasField "inputArg" OpDef OpDef Source 
    HasField "isAggregate" OpDef OpDef Source 
    HasField "isCommutative" OpDef OpDef Source 
    HasField "isStateful" OpDef OpDef Source 
    HasField "maybe'deprecation" OpDef OpDef Source 
    HasField "name" OpDef OpDef Source 
    HasField "outputArg" OpDef OpDef Source 
    HasField "summary" OpDef OpDef Source 
    type Field "allowsUninitializedInput" OpDef = Bool Source 
    type Field "attr" OpDef = [OpDef'AttrDef] Source 
    type Field "deprecation" OpDef = OpDeprecation Source 
    type Field "description" OpDef = Text Source 
    type Field "inputArg" OpDef = [OpDef'ArgDef] Source 
    type Field "isAggregate" OpDef = Bool Source 
    type Field "isCommutative" OpDef = Bool Source 
    type Field "isStateful" OpDef = Bool Source 
    type Field "maybe'deprecation" OpDef = Maybe OpDeprecation Source 
    type Field "name" OpDef = Text Source 
    type Field "outputArg" OpDef = [OpDef'ArgDef] Source 
    type Field "summary" OpDef = Text Source 

    allowedValues :: forall msg msg'. HasField "allowedValues" msg msg' => Lens msg msg' (Field "allowedValues" msg) (Field "allowedValues" msg') Source

    allowsUninitializedInput :: forall msg msg'. HasField "allowsUninitializedInput" msg msg' => Lens msg msg' (Field "allowsUninitializedInput" msg) (Field "allowsUninitializedInput" msg') Source

    attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') Source

    defaultValue :: forall msg msg'. HasField "defaultValue" msg msg' => Lens msg msg' (Field "defaultValue" msg) (Field "defaultValue" msg') Source

    deprecation :: forall msg msg'. HasField "deprecation" msg msg' => Lens msg msg' (Field "deprecation" msg) (Field "deprecation" msg') Source

    description :: forall msg msg'. HasField "description" msg msg' => Lens msg msg' (Field "description" msg) (Field "description" msg') Source

    explanation :: forall msg msg'. HasField "explanation" msg msg' => Lens msg msg' (Field "explanation" msg) (Field "explanation" msg') Source

    hasMinimum :: forall msg msg'. HasField "hasMinimum" msg msg' => Lens msg msg' (Field "hasMinimum" msg) (Field "hasMinimum" msg') Source

    inputArg :: forall msg msg'. HasField "inputArg" msg msg' => Lens msg msg' (Field "inputArg" msg) (Field "inputArg" msg') Source

    isAggregate :: forall msg msg'. HasField "isAggregate" msg msg' => Lens msg msg' (Field "isAggregate" msg) (Field "isAggregate" msg') Source

    isCommutative :: forall msg msg'. HasField "isCommutative" msg msg' => Lens msg msg' (Field "isCommutative" msg) (Field "isCommutative" msg') Source

    isRef :: forall msg msg'. HasField "isRef" msg msg' => Lens msg msg' (Field "isRef" msg) (Field "isRef" msg') Source

    isStateful :: forall msg msg'. HasField "isStateful" msg msg' => Lens msg msg' (Field "isStateful" msg) (Field "isStateful" msg') Source

    maybe'allowedValues :: forall msg msg'. HasField "maybe'allowedValues" msg msg' => Lens msg msg' (Field "maybe'allowedValues" msg) (Field "maybe'allowedValues" msg') Source

    maybe'defaultValue :: forall msg msg'. HasField "maybe'defaultValue" msg msg' => Lens msg msg' (Field "maybe'defaultValue" msg) (Field "maybe'defaultValue" msg') Source

    maybe'deprecation :: forall msg msg'. HasField "maybe'deprecation" msg msg' => Lens msg msg' (Field "maybe'deprecation" msg) (Field "maybe'deprecation" msg') Source

    minimum :: forall msg msg'. HasField "minimum" msg msg' => Lens msg msg' (Field "minimum" msg) (Field "minimum" msg') Source

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') Source

    numberAttr :: forall msg msg'. HasField "numberAttr" msg msg' => Lens msg msg' (Field "numberAttr" msg) (Field "numberAttr" msg') Source

    op :: forall msg msg'. HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg') Source

    outputArg :: forall msg msg'. HasField "outputArg" msg msg' => Lens msg msg' (Field "outputArg" msg) (Field "outputArg" msg') Source

    summary :: forall msg msg'. HasField "summary" msg msg' => Lens msg msg' (Field "summary" msg) (Field "summary" msg') Source

    type' :: forall msg msg'. HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg') Source

    typeAttr :: forall msg msg'. HasField "typeAttr" msg msg' => Lens msg msg' (Field "typeAttr" msg) (Field "typeAttr" msg') Source

    typeListAttr :: forall msg msg'. HasField "typeListAttr" msg msg' => Lens msg msg' (Field "typeListAttr" msg) (Field "typeListAttr" msg') Source

    version :: forall msg msg'. HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg') Source

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.OpDef

    Documentation

    data OpDef

    Instances

    Eq OpDef 
    Show OpDef 
    Message OpDef 
    Default OpDef 
    HasField "allowsUninitializedInput" OpDef OpDef 
    HasField "attr" OpDef OpDef 
    HasField "deprecation" OpDef OpDef 
    HasField "description" OpDef OpDef 
    HasField "inputArg" OpDef OpDef 
    HasField "isAggregate" OpDef OpDef 
    HasField "isCommutative" OpDef OpDef 
    HasField "isStateful" OpDef OpDef 
    HasField "maybe'deprecation" OpDef OpDef 
    HasField "name" OpDef OpDef 
    HasField "outputArg" OpDef OpDef 
    HasField "summary" OpDef OpDef 
    type Field "allowsUninitializedInput" OpDef = Bool 
    type Field "attr" OpDef = [OpDef'AttrDef] 
    type Field "deprecation" OpDef = OpDeprecation 
    type Field "description" OpDef = Text 
    type Field "inputArg" OpDef = [OpDef'ArgDef] 
    type Field "isAggregate" OpDef = Bool 
    type Field "isCommutative" OpDef = Bool 
    type Field "isStateful" OpDef = Bool 
    type Field "maybe'deprecation" OpDef = Maybe OpDeprecation 
    type Field "name" OpDef = Text 
    type Field "outputArg" OpDef = [OpDef'ArgDef] 
    type Field "summary" OpDef = Text 

    data OpDef'ArgDef

    Instances

    Eq OpDef'ArgDef 
    Show OpDef'ArgDef 
    Message OpDef'ArgDef 
    Default OpDef'ArgDef 
    HasField "description" OpDef'ArgDef OpDef'ArgDef 
    HasField "isRef" OpDef'ArgDef OpDef'ArgDef 
    HasField "name" OpDef'ArgDef OpDef'ArgDef 
    HasField "numberAttr" OpDef'ArgDef OpDef'ArgDef 
    HasField "type'" OpDef'ArgDef OpDef'ArgDef 
    HasField "typeAttr" OpDef'ArgDef OpDef'ArgDef 
    HasField "typeListAttr" OpDef'ArgDef OpDef'ArgDef 
    type Field "description" OpDef'ArgDef = Text 
    type Field "isRef" OpDef'ArgDef = Bool 
    type Field "name" OpDef'ArgDef = Text 
    type Field "numberAttr" OpDef'ArgDef = Text 
    type Field "type'" OpDef'ArgDef = DataType 
    type Field "typeAttr" OpDef'ArgDef = Text 
    type Field "typeListAttr" OpDef'ArgDef = Text 

    data OpDef'AttrDef

    Instances

    Eq OpDef'AttrDef 
    Show OpDef'AttrDef 
    Message OpDef'AttrDef 
    Default OpDef'AttrDef 
    HasField "allowedValues" OpDef'AttrDef OpDef'AttrDef 
    HasField "defaultValue" OpDef'AttrDef OpDef'AttrDef 
    HasField "description" OpDef'AttrDef OpDef'AttrDef 
    HasField "hasMinimum" OpDef'AttrDef OpDef'AttrDef 
    HasField "maybe'allowedValues" OpDef'AttrDef OpDef'AttrDef 
    HasField "maybe'defaultValue" OpDef'AttrDef OpDef'AttrDef 
    HasField "minimum" OpDef'AttrDef OpDef'AttrDef 
    HasField "name" OpDef'AttrDef OpDef'AttrDef 
    HasField "type'" OpDef'AttrDef OpDef'AttrDef 
    type Field "allowedValues" OpDef'AttrDef = AttrValue 
    type Field "defaultValue" OpDef'AttrDef = AttrValue 
    type Field "description" OpDef'AttrDef = Text 
    type Field "hasMinimum" OpDef'AttrDef = Bool 
    type Field "maybe'allowedValues" OpDef'AttrDef = Maybe AttrValue 
    type Field "maybe'defaultValue" OpDef'AttrDef = Maybe AttrValue 
    type Field "minimum" OpDef'AttrDef = Int64 
    type Field "name" OpDef'AttrDef = Text 
    type Field "type'" OpDef'AttrDef = Text 

    data OpDeprecation

    Instances

    Eq OpDeprecation 
    Show OpDeprecation 
    Message OpDeprecation 
    Default OpDeprecation 
    HasField "explanation" OpDeprecation OpDeprecation 
    HasField "version" OpDeprecation OpDeprecation 
    type Field "explanation" OpDeprecation = Text 
    type Field "version" OpDeprecation = Int32 

    data OpList

    Constructors

    OpList 

    Fields

    _OpList'op :: ![OpDef]
     

    Instances

    Eq OpList 
    Show OpList 
    Message OpList 
    Default OpList 
    HasField "op" OpList OpList 
    type Field "op" OpList = [OpDef] 

    allowedValues :: forall msg msg'. HasField "allowedValues" msg msg' => Lens msg msg' (Field "allowedValues" msg) (Field "allowedValues" msg')

    allowsUninitializedInput :: forall msg msg'. HasField "allowsUninitializedInput" msg msg' => Lens msg msg' (Field "allowsUninitializedInput" msg) (Field "allowsUninitializedInput" msg')

    attr :: forall msg msg'. HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg')

    defaultValue :: forall msg msg'. HasField "defaultValue" msg msg' => Lens msg msg' (Field "defaultValue" msg) (Field "defaultValue" msg')

    deprecation :: forall msg msg'. HasField "deprecation" msg msg' => Lens msg msg' (Field "deprecation" msg) (Field "deprecation" msg')

    description :: forall msg msg'. HasField "description" msg msg' => Lens msg msg' (Field "description" msg) (Field "description" msg')

    explanation :: forall msg msg'. HasField "explanation" msg msg' => Lens msg msg' (Field "explanation" msg) (Field "explanation" msg')

    hasMinimum :: forall msg msg'. HasField "hasMinimum" msg msg' => Lens msg msg' (Field "hasMinimum" msg) (Field "hasMinimum" msg')

    inputArg :: forall msg msg'. HasField "inputArg" msg msg' => Lens msg msg' (Field "inputArg" msg) (Field "inputArg" msg')

    isAggregate :: forall msg msg'. HasField "isAggregate" msg msg' => Lens msg msg' (Field "isAggregate" msg) (Field "isAggregate" msg')

    isCommutative :: forall msg msg'. HasField "isCommutative" msg msg' => Lens msg msg' (Field "isCommutative" msg) (Field "isCommutative" msg')

    isRef :: forall msg msg'. HasField "isRef" msg msg' => Lens msg msg' (Field "isRef" msg) (Field "isRef" msg')

    isStateful :: forall msg msg'. HasField "isStateful" msg msg' => Lens msg msg' (Field "isStateful" msg) (Field "isStateful" msg')

    maybe'allowedValues :: forall msg msg'. HasField "maybe'allowedValues" msg msg' => Lens msg msg' (Field "maybe'allowedValues" msg) (Field "maybe'allowedValues" msg')

    maybe'defaultValue :: forall msg msg'. HasField "maybe'defaultValue" msg msg' => Lens msg msg' (Field "maybe'defaultValue" msg) (Field "maybe'defaultValue" msg')

    maybe'deprecation :: forall msg msg'. HasField "maybe'deprecation" msg msg' => Lens msg msg' (Field "maybe'deprecation" msg) (Field "maybe'deprecation" msg')

    minimum :: forall msg msg'. HasField "minimum" msg msg' => Lens msg msg' (Field "minimum" msg) (Field "minimum" msg')

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg')

    numberAttr :: forall msg msg'. HasField "numberAttr" msg msg' => Lens msg msg' (Field "numberAttr" msg) (Field "numberAttr" msg')

    op :: forall msg msg'. HasField "op" msg msg' => Lens msg msg' (Field "op" msg) (Field "op" msg')

    outputArg :: forall msg msg'. HasField "outputArg" msg msg' => Lens msg msg' (Field "outputArg" msg) (Field "outputArg" msg')

    summary :: forall msg msg'. HasField "summary" msg msg' => Lens msg msg' (Field "summary" msg) (Field "summary" msg')

    type' :: forall msg msg'. HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field "type'" msg')

    typeAttr :: forall msg msg'. HasField "typeAttr" msg msg' => Lens msg msg' (Field "typeAttr" msg) (Field "typeAttr" msg')

    typeListAttr :: forall msg msg'. HasField "typeListAttr" msg msg' => Lens msg msg' (Field "typeListAttr" msg) (Field "typeListAttr" msg')

    version :: forall msg msg'. HasField "version" msg msg' => Lens msg msg' (Field "version" msg) (Field "version" msg')

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html index 319a9f1..ab58a14 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-ResourceHandle.html @@ -1,4 +1,4 @@ Proto.Tensorflow.Core.Framework.ResourceHandle

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.ResourceHandle

    Documentation

    container :: forall msg msg'. HasField "container" msg msg' => Lens msg msg' (Field "container" msg) (Field "container" msg') Source

    device :: forall msg msg'. HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg') Source

    hashCode :: forall msg msg'. HasField "hashCode" msg msg' => Lens msg msg' (Field "hashCode" msg) (Field "hashCode" msg') Source

    maybeTypeName :: forall msg msg'. HasField "maybeTypeName" msg msg' => Lens msg msg' (Field "maybeTypeName" msg) (Field "maybeTypeName" msg') Source

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') Source

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.ResourceHandle

    Documentation

    data ResourceHandle

    Instances

    Eq ResourceHandle 
    Show ResourceHandle 
    Message ResourceHandle 
    Default ResourceHandle 
    HasField "container" ResourceHandle ResourceHandle 
    HasField "device" ResourceHandle ResourceHandle 
    HasField "hashCode" ResourceHandle ResourceHandle 
    HasField "maybeTypeName" ResourceHandle ResourceHandle 
    HasField "name" ResourceHandle ResourceHandle 
    type Field "container" ResourceHandle = Text 
    type Field "device" ResourceHandle = Text 
    type Field "hashCode" ResourceHandle = Word64 
    type Field "maybeTypeName" ResourceHandle = Text 
    type Field "name" ResourceHandle = Text 

    container :: forall msg msg'. HasField "container" msg msg' => Lens msg msg' (Field "container" msg) (Field "container" msg')

    device :: forall msg msg'. HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg')

    hashCode :: forall msg msg'. HasField "hashCode" msg msg' => Lens msg msg' (Field "hashCode" msg) (Field "hashCode" msg')

    maybeTypeName :: forall msg msg'. HasField "maybeTypeName" msg msg' => Lens msg msg' (Field "maybeTypeName" msg) (Field "maybeTypeName" msg')

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg')

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Summary.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Summary.html new file mode 100644 index 0000000..f5d1c6d --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Summary.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Summary

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.Summary

    Documentation

    data HistogramProto

    Instances

    Eq HistogramProto 
    Show HistogramProto 
    Message HistogramProto 
    Default HistogramProto 
    HasField "bucket" HistogramProto HistogramProto 
    HasField "bucketLimit" HistogramProto HistogramProto 
    HasField "max" HistogramProto HistogramProto 
    HasField "min" HistogramProto HistogramProto 
    HasField "num" HistogramProto HistogramProto 
    HasField "sum" HistogramProto HistogramProto 
    HasField "sumSquares" HistogramProto HistogramProto 
    type Field "bucket" HistogramProto = [Double] 
    type Field "bucketLimit" HistogramProto = [Double] 
    type Field "max" HistogramProto = Double 
    type Field "min" HistogramProto = Double 
    type Field "num" HistogramProto = Double 
    type Field "sum" HistogramProto = Double 
    type Field "sumSquares" HistogramProto = Double 

    data Summary

    Constructors

    Summary 

    Instances

    Eq Summary 
    Show Summary 
    Message Summary 
    Default Summary 
    HasField "value" Summary Summary 
    type Field "value" Summary = [Summary'Value] 

    data Summary'Audio

    Instances

    Eq Summary'Audio 
    Show Summary'Audio 
    Message Summary'Audio 
    Default Summary'Audio 
    HasField "contentType" Summary'Audio Summary'Audio 
    HasField "encodedAudioString" Summary'Audio Summary'Audio 
    HasField "lengthFrames" Summary'Audio Summary'Audio 
    HasField "numChannels" Summary'Audio Summary'Audio 
    HasField "sampleRate" Summary'Audio Summary'Audio 
    type Field "contentType" Summary'Audio = Text 
    type Field "encodedAudioString" Summary'Audio = ByteString 
    type Field "lengthFrames" Summary'Audio = Int64 
    type Field "numChannels" Summary'Audio = Int64 
    type Field "sampleRate" Summary'Audio = Float 

    data Summary'Image

    Instances

    Eq Summary'Image 
    Show Summary'Image 
    Message Summary'Image 
    Default Summary'Image 
    HasField "colorspace" Summary'Image Summary'Image 
    HasField "encodedImageString" Summary'Image Summary'Image 
    HasField "height" Summary'Image Summary'Image 
    HasField "width" Summary'Image Summary'Image 
    type Field "colorspace" Summary'Image = Int32 
    type Field "encodedImageString" Summary'Image = ByteString 
    type Field "height" Summary'Image = Int32 
    type Field "width" Summary'Image = Int32 

    data Summary'Value

    Instances

    Eq Summary'Value 
    Show Summary'Value 
    Message Summary'Value 
    Default Summary'Value 
    HasField "audio" Summary'Value Summary'Value 
    HasField "histo" Summary'Value Summary'Value 
    HasField "image" Summary'Value Summary'Value 
    HasField "maybe'audio" Summary'Value Summary'Value 
    HasField "maybe'histo" Summary'Value Summary'Value 
    HasField "maybe'image" Summary'Value Summary'Value 
    HasField "maybe'obsoleteOldStyleHistogram" Summary'Value Summary'Value 
    HasField "maybe'simpleValue" Summary'Value Summary'Value 
    HasField "maybe'tensor" Summary'Value Summary'Value 
    HasField "nodeName" Summary'Value Summary'Value 
    HasField "obsoleteOldStyleHistogram" Summary'Value Summary'Value 
    HasField "simpleValue" Summary'Value Summary'Value 
    HasField "tag" Summary'Value Summary'Value 
    HasField "tensor" Summary'Value Summary'Value 
    type Field "audio" Summary'Value = Summary'Audio 
    type Field "histo" Summary'Value = HistogramProto 
    type Field "image" Summary'Value = Summary'Image 
    type Field "maybe'audio" Summary'Value = Maybe Summary'Audio 
    type Field "maybe'histo" Summary'Value = Maybe HistogramProto 
    type Field "maybe'image" Summary'Value = Maybe Summary'Image 
    type Field "maybe'obsoleteOldStyleHistogram" Summary'Value = Maybe ByteString 
    type Field "maybe'simpleValue" Summary'Value = Maybe Float 
    type Field "maybe'tensor" Summary'Value = Maybe TensorProto 
    type Field "nodeName" Summary'Value = Text 
    type Field "obsoleteOldStyleHistogram" Summary'Value = ByteString 
    type Field "simpleValue" Summary'Value = Float 
    type Field "tag" Summary'Value = Text 
    type Field "tensor" Summary'Value = TensorProto 

    audio :: forall msg msg'. HasField "audio" msg msg' => Lens msg msg' (Field "audio" msg) (Field "audio" msg')

    bucket :: forall msg msg'. HasField "bucket" msg msg' => Lens msg msg' (Field "bucket" msg) (Field "bucket" msg')

    bucketLimit :: forall msg msg'. HasField "bucketLimit" msg msg' => Lens msg msg' (Field "bucketLimit" msg) (Field "bucketLimit" msg')

    colorspace :: forall msg msg'. HasField "colorspace" msg msg' => Lens msg msg' (Field "colorspace" msg) (Field "colorspace" msg')

    contentType :: forall msg msg'. HasField "contentType" msg msg' => Lens msg msg' (Field "contentType" msg) (Field "contentType" msg')

    encodedAudioString :: forall msg msg'. HasField "encodedAudioString" msg msg' => Lens msg msg' (Field "encodedAudioString" msg) (Field "encodedAudioString" msg')

    encodedImageString :: forall msg msg'. HasField "encodedImageString" msg msg' => Lens msg msg' (Field "encodedImageString" msg) (Field "encodedImageString" msg')

    height :: forall msg msg'. HasField "height" msg msg' => Lens msg msg' (Field "height" msg) (Field "height" msg')

    histo :: forall msg msg'. HasField "histo" msg msg' => Lens msg msg' (Field "histo" msg) (Field "histo" msg')

    image :: forall msg msg'. HasField "image" msg msg' => Lens msg msg' (Field "image" msg) (Field "image" msg')

    lengthFrames :: forall msg msg'. HasField "lengthFrames" msg msg' => Lens msg msg' (Field "lengthFrames" msg) (Field "lengthFrames" msg')

    max :: forall msg msg'. HasField "max" msg msg' => Lens msg msg' (Field "max" msg) (Field "max" msg')

    maybe'audio :: forall msg msg'. HasField "maybe'audio" msg msg' => Lens msg msg' (Field "maybe'audio" msg) (Field "maybe'audio" msg')

    maybe'histo :: forall msg msg'. HasField "maybe'histo" msg msg' => Lens msg msg' (Field "maybe'histo" msg) (Field "maybe'histo" msg')

    maybe'image :: forall msg msg'. HasField "maybe'image" msg msg' => Lens msg msg' (Field "maybe'image" msg) (Field "maybe'image" msg')

    maybe'obsoleteOldStyleHistogram :: forall msg msg'. HasField "maybe'obsoleteOldStyleHistogram" msg msg' => Lens msg msg' (Field "maybe'obsoleteOldStyleHistogram" msg) (Field "maybe'obsoleteOldStyleHistogram" msg')

    maybe'simpleValue :: forall msg msg'. HasField "maybe'simpleValue" msg msg' => Lens msg msg' (Field "maybe'simpleValue" msg) (Field "maybe'simpleValue" msg')

    maybe'tensor :: forall msg msg'. HasField "maybe'tensor" msg msg' => Lens msg msg' (Field "maybe'tensor" msg) (Field "maybe'tensor" msg')

    min :: forall msg msg'. HasField "min" msg msg' => Lens msg msg' (Field "min" msg) (Field "min" msg')

    nodeName :: forall msg msg'. HasField "nodeName" msg msg' => Lens msg msg' (Field "nodeName" msg) (Field "nodeName" msg')

    num :: forall msg msg'. HasField "num" msg msg' => Lens msg msg' (Field "num" msg) (Field "num" msg')

    numChannels :: forall msg msg'. HasField "numChannels" msg msg' => Lens msg msg' (Field "numChannels" msg) (Field "numChannels" msg')

    obsoleteOldStyleHistogram :: forall msg msg'. HasField "obsoleteOldStyleHistogram" msg msg' => Lens msg msg' (Field "obsoleteOldStyleHistogram" msg) (Field "obsoleteOldStyleHistogram" msg')

    sampleRate :: forall msg msg'. HasField "sampleRate" msg msg' => Lens msg msg' (Field "sampleRate" msg) (Field "sampleRate" msg')

    simpleValue :: forall msg msg'. HasField "simpleValue" msg msg' => Lens msg msg' (Field "simpleValue" msg) (Field "simpleValue" msg')

    sum :: forall msg msg'. HasField "sum" msg msg' => Lens msg msg' (Field "sum" msg) (Field "sum" msg')

    sumSquares :: forall msg msg'. HasField "sumSquares" msg msg' => Lens msg msg' (Field "sumSquares" msg) (Field "sumSquares" msg')

    tag :: forall msg msg'. HasField "tag" msg msg' => Lens msg msg' (Field "tag" msg) (Field "tag" msg')

    tensor :: forall msg msg'. HasField "tensor" msg msg' => Lens msg msg' (Field "tensor" msg) (Field "tensor" msg')

    typeHint :: forall msg msg'. HasField "typeHint" msg msg' => Lens msg msg' (Field "typeHint" msg) (Field "typeHint" msg')

    value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg')

    width :: forall msg msg'. HasField "width" msg msg' => Lens msg msg' (Field "width" msg) (Field "width" msg')

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html index 9e6b7ca..d6e9f34 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Tensor.html @@ -1,4 +1,4 @@ Proto.Tensorflow.Core.Framework.Tensor

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.Tensor

    Documentation

    data TensorProto Source

    Instances

    Eq TensorProto Source 
    Show TensorProto Source 
    Default TensorProto Source 
    Message TensorProto Source 
    HasField "boolVal" TensorProto TensorProto Source 
    HasField "dcomplexVal" TensorProto TensorProto Source 
    HasField "doubleVal" TensorProto TensorProto Source 
    HasField "dtype" TensorProto TensorProto Source 
    HasField "floatVal" TensorProto TensorProto Source 
    HasField "halfVal" TensorProto TensorProto Source 
    HasField "int64Val" TensorProto TensorProto Source 
    HasField "intVal" TensorProto TensorProto Source 
    HasField "maybe'tensorShape" TensorProto TensorProto Source 
    HasField "resourceHandleVal" TensorProto TensorProto Source 
    HasField "scomplexVal" TensorProto TensorProto Source 
    HasField "stringVal" TensorProto TensorProto Source 
    HasField "tensorContent" TensorProto TensorProto Source 
    HasField "tensorShape" TensorProto TensorProto Source 
    HasField "versionNumber" TensorProto TensorProto Source 
    type Field "boolVal" TensorProto = [Bool] Source 
    type Field "dcomplexVal" TensorProto = [Double] Source 
    type Field "doubleVal" TensorProto = [Double] Source 
    type Field "dtype" TensorProto = DataType Source 
    type Field "floatVal" TensorProto = [Float] Source 
    type Field "halfVal" TensorProto = [Int32] Source 
    type Field "int64Val" TensorProto = [Int64] Source 
    type Field "intVal" TensorProto = [Int32] Source 
    type Field "maybe'tensorShape" TensorProto = Maybe TensorShapeProto Source 
    type Field "resourceHandleVal" TensorProto = [ResourceHandle] Source 
    type Field "scomplexVal" TensorProto = [Float] Source 
    type Field "stringVal" TensorProto = [ByteString] Source 
    type Field "tensorContent" TensorProto = ByteString Source 
    type Field "tensorShape" TensorProto = TensorShapeProto Source 
    type Field "versionNumber" TensorProto = Int32 Source 

    boolVal :: forall msg msg'. HasField "boolVal" msg msg' => Lens msg msg' (Field "boolVal" msg) (Field "boolVal" msg') Source

    dcomplexVal :: forall msg msg'. HasField "dcomplexVal" msg msg' => Lens msg msg' (Field "dcomplexVal" msg) (Field "dcomplexVal" msg') Source

    doubleVal :: forall msg msg'. HasField "doubleVal" msg msg' => Lens msg msg' (Field "doubleVal" msg) (Field "doubleVal" msg') Source

    dtype :: forall msg msg'. HasField "dtype" msg msg' => Lens msg msg' (Field "dtype" msg) (Field "dtype" msg') Source

    floatVal :: forall msg msg'. HasField "floatVal" msg msg' => Lens msg msg' (Field "floatVal" msg) (Field "floatVal" msg') Source

    halfVal :: forall msg msg'. HasField "halfVal" msg msg' => Lens msg msg' (Field "halfVal" msg) (Field "halfVal" msg') Source

    int64Val :: forall msg msg'. HasField "int64Val" msg msg' => Lens msg msg' (Field "int64Val" msg) (Field "int64Val" msg') Source

    intVal :: forall msg msg'. HasField "intVal" msg msg' => Lens msg msg' (Field "intVal" msg) (Field "intVal" msg') Source

    maybe'tensorShape :: forall msg msg'. HasField "maybe'tensorShape" msg msg' => Lens msg msg' (Field "maybe'tensorShape" msg) (Field "maybe'tensorShape" msg') Source

    resourceHandleVal :: forall msg msg'. HasField "resourceHandleVal" msg msg' => Lens msg msg' (Field "resourceHandleVal" msg) (Field "resourceHandleVal" msg') Source

    scomplexVal :: forall msg msg'. HasField "scomplexVal" msg msg' => Lens msg msg' (Field "scomplexVal" msg) (Field "scomplexVal" msg') Source

    stringVal :: forall msg msg'. HasField "stringVal" msg msg' => Lens msg msg' (Field "stringVal" msg) (Field "stringVal" msg') Source

    tensorContent :: forall msg msg'. HasField "tensorContent" msg msg' => Lens msg msg' (Field "tensorContent" msg) (Field "tensorContent" msg') Source

    tensorShape :: forall msg msg'. HasField "tensorShape" msg msg' => Lens msg msg' (Field "tensorShape" msg) (Field "tensorShape" msg') Source

    versionNumber :: forall msg msg'. HasField "versionNumber" msg msg' => Lens msg msg' (Field "versionNumber" msg) (Field "versionNumber" msg') Source

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.Tensor

    Documentation

    data TensorProto

    Instances

    Eq TensorProto 
    Show TensorProto 
    Message TensorProto 
    Default TensorProto 
    HasField "boolVal" TensorProto TensorProto 
    HasField "dcomplexVal" TensorProto TensorProto 
    HasField "doubleVal" TensorProto TensorProto 
    HasField "dtype" TensorProto TensorProto 
    HasField "floatVal" TensorProto TensorProto 
    HasField "halfVal" TensorProto TensorProto 
    HasField "int64Val" TensorProto TensorProto 
    HasField "intVal" TensorProto TensorProto 
    HasField "maybe'tensorShape" TensorProto TensorProto 
    HasField "resourceHandleVal" TensorProto TensorProto 
    HasField "scomplexVal" TensorProto TensorProto 
    HasField "stringVal" TensorProto TensorProto 
    HasField "tensorContent" TensorProto TensorProto 
    HasField "tensorShape" TensorProto TensorProto 
    HasField "versionNumber" TensorProto TensorProto 
    type Field "boolVal" TensorProto = [Bool] 
    type Field "dcomplexVal" TensorProto = [Double] 
    type Field "doubleVal" TensorProto = [Double] 
    type Field "dtype" TensorProto = DataType 
    type Field "floatVal" TensorProto = [Float] 
    type Field "halfVal" TensorProto = [Int32] 
    type Field "int64Val" TensorProto = [Int64] 
    type Field "intVal" TensorProto = [Int32] 
    type Field "maybe'tensorShape" TensorProto = Maybe TensorShapeProto 
    type Field "resourceHandleVal" TensorProto = [ResourceHandle] 
    type Field "scomplexVal" TensorProto = [Float] 
    type Field "stringVal" TensorProto = [ByteString] 
    type Field "tensorContent" TensorProto = ByteString 
    type Field "tensorShape" TensorProto = TensorShapeProto 
    type Field "versionNumber" TensorProto = Int32 

    boolVal :: forall msg msg'. HasField "boolVal" msg msg' => Lens msg msg' (Field "boolVal" msg) (Field "boolVal" msg')

    dcomplexVal :: forall msg msg'. HasField "dcomplexVal" msg msg' => Lens msg msg' (Field "dcomplexVal" msg) (Field "dcomplexVal" msg')

    doubleVal :: forall msg msg'. HasField "doubleVal" msg msg' => Lens msg msg' (Field "doubleVal" msg) (Field "doubleVal" msg')

    dtype :: forall msg msg'. HasField "dtype" msg msg' => Lens msg msg' (Field "dtype" msg) (Field "dtype" msg')

    floatVal :: forall msg msg'. HasField "floatVal" msg msg' => Lens msg msg' (Field "floatVal" msg) (Field "floatVal" msg')

    halfVal :: forall msg msg'. HasField "halfVal" msg msg' => Lens msg msg' (Field "halfVal" msg) (Field "halfVal" msg')

    int64Val :: forall msg msg'. HasField "int64Val" msg msg' => Lens msg msg' (Field "int64Val" msg) (Field "int64Val" msg')

    intVal :: forall msg msg'. HasField "intVal" msg msg' => Lens msg msg' (Field "intVal" msg) (Field "intVal" msg')

    maybe'tensorShape :: forall msg msg'. HasField "maybe'tensorShape" msg msg' => Lens msg msg' (Field "maybe'tensorShape" msg) (Field "maybe'tensorShape" msg')

    resourceHandleVal :: forall msg msg'. HasField "resourceHandleVal" msg msg' => Lens msg msg' (Field "resourceHandleVal" msg) (Field "resourceHandleVal" msg')

    scomplexVal :: forall msg msg'. HasField "scomplexVal" msg msg' => Lens msg msg' (Field "scomplexVal" msg) (Field "scomplexVal" msg')

    stringVal :: forall msg msg'. HasField "stringVal" msg msg' => Lens msg msg' (Field "stringVal" msg) (Field "stringVal" msg')

    tensorContent :: forall msg msg'. HasField "tensorContent" msg msg' => Lens msg msg' (Field "tensorContent" msg) (Field "tensorContent" msg')

    tensorShape :: forall msg msg'. HasField "tensorShape" msg msg' => Lens msg msg' (Field "tensorShape" msg) (Field "tensorShape" msg')

    versionNumber :: forall msg msg'. HasField "versionNumber" msg msg' => Lens msg msg' (Field "versionNumber" msg) (Field "versionNumber" msg')

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html index 6583c2c..f48b230 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-TensorShape.html @@ -1,4 +1,4 @@ Proto.Tensorflow.Core.Framework.TensorShape

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.TensorShape

    Documentation

    dim :: forall msg msg'. HasField "dim" msg msg' => Lens msg msg' (Field "dim" msg) (Field "dim" msg') Source

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') Source

    size :: forall msg msg'. HasField "size" msg msg' => Lens msg msg' (Field "size" msg) (Field "size" msg') Source

    unknownRank :: forall msg msg'. HasField "unknownRank" msg msg' => Lens msg msg' (Field "unknownRank" msg) (Field "unknownRank" msg') Source

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Framework.TensorShape

    Documentation

    dim :: forall msg msg'. HasField "dim" msg msg' => Lens msg msg' (Field "dim" msg) (Field "dim" msg')

    name :: forall msg msg'. HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg')

    size :: forall msg msg'. HasField "size" msg msg' => Lens msg msg' (Field "size" msg) (Field "size" msg')

    unknownRank :: forall msg msg'. HasField "unknownRank" msg msg' => Lens msg msg' (Field "unknownRank" msg) (Field "unknownRank" msg')

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html index c3be1b0..c0c81dd 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Framework-Types.html @@ -1,4 +1,4 @@ Proto.Tensorflow.Core.Framework.Types

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html index 3a1c6e5..e8baf38 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Protobuf-Config.html @@ -1,4 +1,4 @@ Proto.Tensorflow.Core.Protobuf.Config

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Protobuf.Config

    Documentation

    data ConfigProto Source

    Instances

    Eq ConfigProto Source 
    Show ConfigProto Source 
    Default ConfigProto Source 
    Message ConfigProto Source 
    HasField "allowSoftPlacement" ConfigProto ConfigProto Source 
    HasField "deviceCount" ConfigProto ConfigProto Source 
    HasField "deviceFilters" ConfigProto ConfigProto Source 
    HasField "gpuOptions" ConfigProto ConfigProto Source 
    HasField "graphOptions" ConfigProto ConfigProto Source 
    HasField "interOpParallelismThreads" ConfigProto ConfigProto Source 
    HasField "intraOpParallelismThreads" ConfigProto ConfigProto Source 
    HasField "logDevicePlacement" ConfigProto ConfigProto Source 
    HasField "maybe'gpuOptions" ConfigProto ConfigProto Source 
    HasField "maybe'graphOptions" ConfigProto ConfigProto Source 
    HasField "operationTimeoutInMs" ConfigProto ConfigProto Source 
    HasField "placementPeriod" ConfigProto ConfigProto Source 
    HasField "sessionInterOpThreadPool" ConfigProto ConfigProto Source 
    HasField "usePerSessionThreads" ConfigProto ConfigProto Source 
    type Field "allowSoftPlacement" ConfigProto = Bool Source 
    type Field "deviceCount" ConfigProto = Map Text Int32 Source 
    type Field "deviceFilters" ConfigProto = [Text] Source 
    type Field "gpuOptions" ConfigProto = GPUOptions Source 
    type Field "graphOptions" ConfigProto = GraphOptions Source 
    type Field "interOpParallelismThreads" ConfigProto = Int32 Source 
    type Field "intraOpParallelismThreads" ConfigProto = Int32 Source 
    type Field "logDevicePlacement" ConfigProto = Bool Source 
    type Field "maybe'gpuOptions" ConfigProto = Maybe GPUOptions Source 
    type Field "maybe'graphOptions" ConfigProto = Maybe GraphOptions Source 
    type Field "operationTimeoutInMs" ConfigProto = Int64 Source 
    type Field "placementPeriod" ConfigProto = Int32 Source 
    type Field "sessionInterOpThreadPool" ConfigProto = [ThreadPoolOptionProto] Source 
    type Field "usePerSessionThreads" ConfigProto = Bool Source 

    data GraphOptions Source

    Instances

    Eq GraphOptions Source 
    Show GraphOptions Source 
    Default GraphOptions Source 
    Message GraphOptions Source 
    HasField "buildCostModel" GraphOptions GraphOptions Source 
    HasField "buildCostModelAfter" GraphOptions GraphOptions Source 
    HasField "enableBfloat16Sendrecv" GraphOptions GraphOptions Source 
    HasField "enableRecvScheduling" GraphOptions GraphOptions Source 
    HasField "inferShapes" GraphOptions GraphOptions Source 
    HasField "maybe'optimizerOptions" GraphOptions GraphOptions Source 
    HasField "optimizerOptions" GraphOptions GraphOptions Source 
    HasField "placePrunedGraph" GraphOptions GraphOptions Source 
    HasField "timelineStep" GraphOptions GraphOptions Source 
    type Field "buildCostModel" GraphOptions = Int64 Source 
    type Field "buildCostModelAfter" GraphOptions = Int64 Source 
    type Field "enableBfloat16Sendrecv" GraphOptions = Bool Source 
    type Field "enableRecvScheduling" GraphOptions = Bool Source 
    type Field "inferShapes" GraphOptions = Bool Source 
    type Field "maybe'optimizerOptions" GraphOptions = Maybe OptimizerOptions Source 
    type Field "optimizerOptions" GraphOptions = OptimizerOptions Source 
    type Field "placePrunedGraph" GraphOptions = Bool Source 
    type Field "timelineStep" GraphOptions = Int32 Source 

    allocatorType :: forall msg msg'. HasField "allocatorType" msg msg' => Lens msg msg' (Field "allocatorType" msg) (Field "allocatorType" msg') Source

    allowGrowth :: forall msg msg'. HasField "allowGrowth" msg msg' => Lens msg msg' (Field "allowGrowth" msg) (Field "allowGrowth" msg') Source

    allowSoftPlacement :: forall msg msg'. HasField "allowSoftPlacement" msg msg' => Lens msg msg' (Field "allowSoftPlacement" msg) (Field "allowSoftPlacement" msg') Source

    buildCostModel :: forall msg msg'. HasField "buildCostModel" msg msg' => Lens msg msg' (Field "buildCostModel" msg) (Field "buildCostModel" msg') Source

    buildCostModelAfter :: forall msg msg'. HasField "buildCostModelAfter" msg msg' => Lens msg msg' (Field "buildCostModelAfter" msg) (Field "buildCostModelAfter" msg') Source

    costGraph :: forall msg msg'. HasField "costGraph" msg msg' => Lens msg msg' (Field "costGraph" msg) (Field "costGraph" msg') Source

    debugOps :: forall msg msg'. HasField "debugOps" msg msg' => Lens msg msg' (Field "debugOps" msg) (Field "debugOps" msg') Source

    debugTensorWatchOpts :: forall msg msg'. HasField "debugTensorWatchOpts" msg msg' => Lens msg msg' (Field "debugTensorWatchOpts" msg) (Field "debugTensorWatchOpts" msg') Source

    debugUrls :: forall msg msg'. HasField "debugUrls" msg msg' => Lens msg msg' (Field "debugUrls" msg) (Field "debugUrls" msg') Source

    deferredDeletionBytes :: forall msg msg'. HasField "deferredDeletionBytes" msg msg' => Lens msg msg' (Field "deferredDeletionBytes" msg) (Field "deferredDeletionBytes" msg') Source

    deviceCount :: forall msg msg'. HasField "deviceCount" msg msg' => Lens msg msg' (Field "deviceCount" msg) (Field "deviceCount" msg') Source

    deviceFilters :: forall msg msg'. HasField "deviceFilters" msg msg' => Lens msg msg' (Field "deviceFilters" msg) (Field "deviceFilters" msg') Source

    doCommonSubexpressionElimination :: forall msg msg'. HasField "doCommonSubexpressionElimination" msg msg' => Lens msg msg' (Field "doCommonSubexpressionElimination" msg) (Field "doCommonSubexpressionElimination" msg') Source

    doConstantFolding :: forall msg msg'. HasField "doConstantFolding" msg msg' => Lens msg msg' (Field "doConstantFolding" msg) (Field "doConstantFolding" msg') Source

    doFunctionInlining :: forall msg msg'. HasField "doFunctionInlining" msg msg' => Lens msg msg' (Field "doFunctionInlining" msg) (Field "doFunctionInlining" msg') Source

    enableBfloat16Sendrecv :: forall msg msg'. HasField "enableBfloat16Sendrecv" msg msg' => Lens msg msg' (Field "enableBfloat16Sendrecv" msg) (Field "enableBfloat16Sendrecv" msg') Source

    enableRecvScheduling :: forall msg msg'. HasField "enableRecvScheduling" msg msg' => Lens msg msg' (Field "enableRecvScheduling" msg) (Field "enableRecvScheduling" msg') Source

    gpuOptions :: forall msg msg'. HasField "gpuOptions" msg msg' => Lens msg msg' (Field "gpuOptions" msg) (Field "gpuOptions" msg') Source

    graphOptions :: forall msg msg'. HasField "graphOptions" msg msg' => Lens msg msg' (Field "graphOptions" msg) (Field "graphOptions" msg') Source

    inferShapes :: forall msg msg'. HasField "inferShapes" msg msg' => Lens msg msg' (Field "inferShapes" msg) (Field "inferShapes" msg') Source

    interOpParallelismThreads :: forall msg msg'. HasField "interOpParallelismThreads" msg msg' => Lens msg msg' (Field "interOpParallelismThreads" msg) (Field "interOpParallelismThreads" msg') Source

    interOpThreadPool :: forall msg msg'. HasField "interOpThreadPool" msg msg' => Lens msg msg' (Field "interOpThreadPool" msg) (Field "interOpThreadPool" msg') Source

    intraOpParallelismThreads :: forall msg msg'. HasField "intraOpParallelismThreads" msg msg' => Lens msg msg' (Field "intraOpParallelismThreads" msg) (Field "intraOpParallelismThreads" msg') Source

    key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') Source

    logDevicePlacement :: forall msg msg'. HasField "logDevicePlacement" msg msg' => Lens msg msg' (Field "logDevicePlacement" msg) (Field "logDevicePlacement" msg') Source

    maybe'costGraph :: forall msg msg'. HasField "maybe'costGraph" msg msg' => Lens msg msg' (Field "maybe'costGraph" msg) (Field "maybe'costGraph" msg') Source

    maybe'gpuOptions :: forall msg msg'. HasField "maybe'gpuOptions" msg msg' => Lens msg msg' (Field "maybe'gpuOptions" msg) (Field "maybe'gpuOptions" msg') Source

    maybe'graphOptions :: forall msg msg'. HasField "maybe'graphOptions" msg msg' => Lens msg msg' (Field "maybe'graphOptions" msg) (Field "maybe'graphOptions" msg') Source

    maybe'optimizerOptions :: forall msg msg'. HasField "maybe'optimizerOptions" msg msg' => Lens msg msg' (Field "maybe'optimizerOptions" msg) (Field "maybe'optimizerOptions" msg') Source

    maybe'stepStats :: forall msg msg'. HasField "maybe'stepStats" msg msg' => Lens msg msg' (Field "maybe'stepStats" msg) (Field "maybe'stepStats" msg') Source

    nodeName :: forall msg msg'. HasField "nodeName" msg msg' => Lens msg msg' (Field "nodeName" msg) (Field "nodeName" msg') Source

    numThreads :: forall msg msg'. HasField "numThreads" msg msg' => Lens msg msg' (Field "numThreads" msg) (Field "numThreads" msg') Source

    operationTimeoutInMs :: forall msg msg'. HasField "operationTimeoutInMs" msg msg' => Lens msg msg' (Field "operationTimeoutInMs" msg) (Field "operationTimeoutInMs" msg') Source

    optLevel :: forall msg msg'. HasField "optLevel" msg msg' => Lens msg msg' (Field "optLevel" msg) (Field "optLevel" msg') Source

    optimizerOptions :: forall msg msg'. HasField "optimizerOptions" msg msg' => Lens msg msg' (Field "optimizerOptions" msg) (Field "optimizerOptions" msg') Source

    outputPartitionGraphs :: forall msg msg'. HasField "outputPartitionGraphs" msg msg' => Lens msg msg' (Field "outputPartitionGraphs" msg) (Field "outputPartitionGraphs" msg') Source

    outputSlot :: forall msg msg'. HasField "outputSlot" msg msg' => Lens msg msg' (Field "outputSlot" msg) (Field "outputSlot" msg') Source

    partitionGraphs :: forall msg msg'. HasField "partitionGraphs" msg msg' => Lens msg msg' (Field "partitionGraphs" msg) (Field "partitionGraphs" msg') Source

    perProcessGpuMemoryFraction :: forall msg msg'. HasField "perProcessGpuMemoryFraction" msg msg' => Lens msg msg' (Field "perProcessGpuMemoryFraction" msg) (Field "perProcessGpuMemoryFraction" msg') Source

    placePrunedGraph :: forall msg msg'. HasField "placePrunedGraph" msg msg' => Lens msg msg' (Field "placePrunedGraph" msg) (Field "placePrunedGraph" msg') Source

    placementPeriod :: forall msg msg'. HasField "placementPeriod" msg msg' => Lens msg msg' (Field "placementPeriod" msg) (Field "placementPeriod" msg') Source

    sessionInterOpThreadPool :: forall msg msg'. HasField "sessionInterOpThreadPool" msg msg' => Lens msg msg' (Field "sessionInterOpThreadPool" msg) (Field "sessionInterOpThreadPool" msg') Source

    stepStats :: forall msg msg'. HasField "stepStats" msg msg' => Lens msg msg' (Field "stepStats" msg) (Field "stepStats" msg') Source

    timelineStep :: forall msg msg'. HasField "timelineStep" msg msg' => Lens msg msg' (Field "timelineStep" msg) (Field "timelineStep" msg') Source

    timeoutInMs :: forall msg msg'. HasField "timeoutInMs" msg msg' => Lens msg msg' (Field "timeoutInMs" msg) (Field "timeoutInMs" msg') Source

    traceLevel :: forall msg msg'. HasField "traceLevel" msg msg' => Lens msg msg' (Field "traceLevel" msg) (Field "traceLevel" msg') Source

    usePerSessionThreads :: forall msg msg'. HasField "usePerSessionThreads" msg msg' => Lens msg msg' (Field "usePerSessionThreads" msg) (Field "usePerSessionThreads" msg') Source

    value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') Source

    visibleDeviceList :: forall msg msg'. HasField "visibleDeviceList" msg msg' => Lens msg msg' (Field "visibleDeviceList" msg) (Field "visibleDeviceList" msg') Source

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Protobuf.Config

    Documentation

    data ConfigProto

    Instances

    Eq ConfigProto 
    Show ConfigProto 
    Message ConfigProto 
    Default ConfigProto 
    HasField "allowSoftPlacement" ConfigProto ConfigProto 
    HasField "deviceCount" ConfigProto ConfigProto 
    HasField "deviceFilters" ConfigProto ConfigProto 
    HasField "gpuOptions" ConfigProto ConfigProto 
    HasField "graphOptions" ConfigProto ConfigProto 
    HasField "interOpParallelismThreads" ConfigProto ConfigProto 
    HasField "intraOpParallelismThreads" ConfigProto ConfigProto 
    HasField "logDevicePlacement" ConfigProto ConfigProto 
    HasField "maybe'gpuOptions" ConfigProto ConfigProto 
    HasField "maybe'graphOptions" ConfigProto ConfigProto 
    HasField "maybe'rpcOptions" ConfigProto ConfigProto 
    HasField "operationTimeoutInMs" ConfigProto ConfigProto 
    HasField "placementPeriod" ConfigProto ConfigProto 
    HasField "rpcOptions" ConfigProto ConfigProto 
    HasField "sessionInterOpThreadPool" ConfigProto ConfigProto 
    HasField "usePerSessionThreads" ConfigProto ConfigProto 
    type Field "allowSoftPlacement" ConfigProto = Bool 
    type Field "deviceCount" ConfigProto = Map Text Int32 
    type Field "deviceFilters" ConfigProto = [Text] 
    type Field "gpuOptions" ConfigProto = GPUOptions 
    type Field "graphOptions" ConfigProto = GraphOptions 
    type Field "interOpParallelismThreads" ConfigProto = Int32 
    type Field "intraOpParallelismThreads" ConfigProto = Int32 
    type Field "logDevicePlacement" ConfigProto = Bool 
    type Field "maybe'gpuOptions" ConfigProto = Maybe GPUOptions 
    type Field "maybe'graphOptions" ConfigProto = Maybe GraphOptions 
    type Field "maybe'rpcOptions" ConfigProto = Maybe RPCOptions 
    type Field "operationTimeoutInMs" ConfigProto = Int64 
    type Field "placementPeriod" ConfigProto = Int32 
    type Field "rpcOptions" ConfigProto = RPCOptions 
    type Field "sessionInterOpThreadPool" ConfigProto = [ThreadPoolOptionProto] 
    type Field "usePerSessionThreads" ConfigProto = Bool 

    data GPUOptions

    Instances

    Eq GPUOptions 
    Show GPUOptions 
    Message GPUOptions 
    Default GPUOptions 
    HasField "allocatorType" GPUOptions GPUOptions 
    HasField "allowGrowth" GPUOptions GPUOptions 
    HasField "deferredDeletionBytes" GPUOptions GPUOptions 
    HasField "perProcessGpuMemoryFraction" GPUOptions GPUOptions 
    HasField "visibleDeviceList" GPUOptions GPUOptions 
    type Field "allocatorType" GPUOptions = Text 
    type Field "allowGrowth" GPUOptions = Bool 
    type Field "deferredDeletionBytes" GPUOptions = Int64 
    type Field "perProcessGpuMemoryFraction" GPUOptions = Double 
    type Field "visibleDeviceList" GPUOptions = Text 

    data GraphOptions

    Instances

    Eq GraphOptions 
    Show GraphOptions 
    Message GraphOptions 
    Default GraphOptions 
    HasField "buildCostModel" GraphOptions GraphOptions 
    HasField "buildCostModelAfter" GraphOptions GraphOptions 
    HasField "enableBfloat16Sendrecv" GraphOptions GraphOptions 
    HasField "enableRecvScheduling" GraphOptions GraphOptions 
    HasField "inferShapes" GraphOptions GraphOptions 
    HasField "maybe'optimizerOptions" GraphOptions GraphOptions 
    HasField "optimizerOptions" GraphOptions GraphOptions 
    HasField "placePrunedGraph" GraphOptions GraphOptions 
    HasField "timelineStep" GraphOptions GraphOptions 
    type Field "buildCostModel" GraphOptions = Int64 
    type Field "buildCostModelAfter" GraphOptions = Int64 
    type Field "enableBfloat16Sendrecv" GraphOptions = Bool 
    type Field "enableRecvScheduling" GraphOptions = Bool 
    type Field "inferShapes" GraphOptions = Bool 
    type Field "maybe'optimizerOptions" GraphOptions = Maybe OptimizerOptions 
    type Field "optimizerOptions" GraphOptions = OptimizerOptions 
    type Field "placePrunedGraph" GraphOptions = Bool 
    type Field "timelineStep" GraphOptions = Int32 

    data OptimizerOptions

    Instances

    Eq OptimizerOptions 
    Show OptimizerOptions 
    Message OptimizerOptions 
    Default OptimizerOptions 
    HasField "doCommonSubexpressionElimination" OptimizerOptions OptimizerOptions 
    HasField "doConstantFolding" OptimizerOptions OptimizerOptions 
    HasField "doFunctionInlining" OptimizerOptions OptimizerOptions 
    HasField "globalJitLevel" OptimizerOptions OptimizerOptions 
    HasField "optLevel" OptimizerOptions OptimizerOptions 
    type Field "doCommonSubexpressionElimination" OptimizerOptions = Bool 
    type Field "doConstantFolding" OptimizerOptions = Bool 
    type Field "doFunctionInlining" OptimizerOptions = Bool 
    type Field "globalJitLevel" OptimizerOptions = OptimizerOptions'GlobalJitLevel 
    type Field "optLevel" OptimizerOptions = OptimizerOptions'Level 

    data RPCOptions

    Instances

    Eq RPCOptions 
    Show RPCOptions 
    Message RPCOptions 
    Default RPCOptions 
    HasField "useRpcForInprocessMaster" RPCOptions RPCOptions 
    type Field "useRpcForInprocessMaster" RPCOptions = Bool 

    data RunMetadata

    Constructors

    RunMetadata 

    Instances

    Eq RunMetadata 
    Show RunMetadata 
    Message RunMetadata 
    Default RunMetadata 
    HasField "costGraph" RunMetadata RunMetadata 
    HasField "maybe'costGraph" RunMetadata RunMetadata 
    HasField "maybe'stepStats" RunMetadata RunMetadata 
    HasField "partitionGraphs" RunMetadata RunMetadata 
    HasField "stepStats" RunMetadata RunMetadata 
    type Field "costGraph" RunMetadata 
    type Field "maybe'costGraph" RunMetadata 
    type Field "maybe'stepStats" RunMetadata 
    type Field "partitionGraphs" RunMetadata = [GraphDef] 
    type Field "stepStats" RunMetadata 

    data RunOptions

    Instances

    Eq RunOptions 
    Show RunOptions 
    Message RunOptions 
    Default RunOptions 
    HasField "debugOptions" RunOptions RunOptions 
    HasField "interOpThreadPool" RunOptions RunOptions 
    HasField "maybe'debugOptions" RunOptions RunOptions 
    HasField "outputPartitionGraphs" RunOptions RunOptions 
    HasField "timeoutInMs" RunOptions RunOptions 
    HasField "traceLevel" RunOptions RunOptions 
    type Field "debugOptions" RunOptions 
    type Field "interOpThreadPool" RunOptions = Int32 
    type Field "maybe'debugOptions" RunOptions 
    type Field "outputPartitionGraphs" RunOptions = Bool 
    type Field "timeoutInMs" RunOptions = Int64 
    type Field "traceLevel" RunOptions = RunOptions'TraceLevel 

    allocatorType :: forall msg msg'. HasField "allocatorType" msg msg' => Lens msg msg' (Field "allocatorType" msg) (Field "allocatorType" msg')

    allowGrowth :: forall msg msg'. HasField "allowGrowth" msg msg' => Lens msg msg' (Field "allowGrowth" msg) (Field "allowGrowth" msg')

    allowSoftPlacement :: forall msg msg'. HasField "allowSoftPlacement" msg msg' => Lens msg msg' (Field "allowSoftPlacement" msg) (Field "allowSoftPlacement" msg')

    buildCostModel :: forall msg msg'. HasField "buildCostModel" msg msg' => Lens msg msg' (Field "buildCostModel" msg) (Field "buildCostModel" msg')

    buildCostModelAfter :: forall msg msg'. HasField "buildCostModelAfter" msg msg' => Lens msg msg' (Field "buildCostModelAfter" msg) (Field "buildCostModelAfter" msg')

    costGraph :: forall msg msg'. HasField "costGraph" msg msg' => Lens msg msg' (Field "costGraph" msg) (Field "costGraph" msg')

    debugOptions :: forall msg msg'. HasField "debugOptions" msg msg' => Lens msg msg' (Field "debugOptions" msg) (Field "debugOptions" msg')

    deferredDeletionBytes :: forall msg msg'. HasField "deferredDeletionBytes" msg msg' => Lens msg msg' (Field "deferredDeletionBytes" msg) (Field "deferredDeletionBytes" msg')

    deviceCount :: forall msg msg'. HasField "deviceCount" msg msg' => Lens msg msg' (Field "deviceCount" msg) (Field "deviceCount" msg')

    deviceFilters :: forall msg msg'. HasField "deviceFilters" msg msg' => Lens msg msg' (Field "deviceFilters" msg) (Field "deviceFilters" msg')

    doCommonSubexpressionElimination :: forall msg msg'. HasField "doCommonSubexpressionElimination" msg msg' => Lens msg msg' (Field "doCommonSubexpressionElimination" msg) (Field "doCommonSubexpressionElimination" msg')

    doConstantFolding :: forall msg msg'. HasField "doConstantFolding" msg msg' => Lens msg msg' (Field "doConstantFolding" msg) (Field "doConstantFolding" msg')

    doFunctionInlining :: forall msg msg'. HasField "doFunctionInlining" msg msg' => Lens msg msg' (Field "doFunctionInlining" msg) (Field "doFunctionInlining" msg')

    enableBfloat16Sendrecv :: forall msg msg'. HasField "enableBfloat16Sendrecv" msg msg' => Lens msg msg' (Field "enableBfloat16Sendrecv" msg) (Field "enableBfloat16Sendrecv" msg')

    enableRecvScheduling :: forall msg msg'. HasField "enableRecvScheduling" msg msg' => Lens msg msg' (Field "enableRecvScheduling" msg) (Field "enableRecvScheduling" msg')

    globalJitLevel :: forall msg msg'. HasField "globalJitLevel" msg msg' => Lens msg msg' (Field "globalJitLevel" msg) (Field "globalJitLevel" msg')

    gpuOptions :: forall msg msg'. HasField "gpuOptions" msg msg' => Lens msg msg' (Field "gpuOptions" msg) (Field "gpuOptions" msg')

    graphOptions :: forall msg msg'. HasField "graphOptions" msg msg' => Lens msg msg' (Field "graphOptions" msg) (Field "graphOptions" msg')

    inferShapes :: forall msg msg'. HasField "inferShapes" msg msg' => Lens msg msg' (Field "inferShapes" msg) (Field "inferShapes" msg')

    interOpParallelismThreads :: forall msg msg'. HasField "interOpParallelismThreads" msg msg' => Lens msg msg' (Field "interOpParallelismThreads" msg) (Field "interOpParallelismThreads" msg')

    interOpThreadPool :: forall msg msg'. HasField "interOpThreadPool" msg msg' => Lens msg msg' (Field "interOpThreadPool" msg) (Field "interOpThreadPool" msg')

    intraOpParallelismThreads :: forall msg msg'. HasField "intraOpParallelismThreads" msg msg' => Lens msg msg' (Field "intraOpParallelismThreads" msg) (Field "intraOpParallelismThreads" msg')

    key :: forall msg msg'. HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg')

    logDevicePlacement :: forall msg msg'. HasField "logDevicePlacement" msg msg' => Lens msg msg' (Field "logDevicePlacement" msg) (Field "logDevicePlacement" msg')

    maybe'costGraph :: forall msg msg'. HasField "maybe'costGraph" msg msg' => Lens msg msg' (Field "maybe'costGraph" msg) (Field "maybe'costGraph" msg')

    maybe'debugOptions :: forall msg msg'. HasField "maybe'debugOptions" msg msg' => Lens msg msg' (Field "maybe'debugOptions" msg) (Field "maybe'debugOptions" msg')

    maybe'gpuOptions :: forall msg msg'. HasField "maybe'gpuOptions" msg msg' => Lens msg msg' (Field "maybe'gpuOptions" msg) (Field "maybe'gpuOptions" msg')

    maybe'graphOptions :: forall msg msg'. HasField "maybe'graphOptions" msg msg' => Lens msg msg' (Field "maybe'graphOptions" msg) (Field "maybe'graphOptions" msg')

    maybe'optimizerOptions :: forall msg msg'. HasField "maybe'optimizerOptions" msg msg' => Lens msg msg' (Field "maybe'optimizerOptions" msg) (Field "maybe'optimizerOptions" msg')

    maybe'rpcOptions :: forall msg msg'. HasField "maybe'rpcOptions" msg msg' => Lens msg msg' (Field "maybe'rpcOptions" msg) (Field "maybe'rpcOptions" msg')

    maybe'stepStats :: forall msg msg'. HasField "maybe'stepStats" msg msg' => Lens msg msg' (Field "maybe'stepStats" msg) (Field "maybe'stepStats" msg')

    numThreads :: forall msg msg'. HasField "numThreads" msg msg' => Lens msg msg' (Field "numThreads" msg) (Field "numThreads" msg')

    operationTimeoutInMs :: forall msg msg'. HasField "operationTimeoutInMs" msg msg' => Lens msg msg' (Field "operationTimeoutInMs" msg) (Field "operationTimeoutInMs" msg')

    optLevel :: forall msg msg'. HasField "optLevel" msg msg' => Lens msg msg' (Field "optLevel" msg) (Field "optLevel" msg')

    optimizerOptions :: forall msg msg'. HasField "optimizerOptions" msg msg' => Lens msg msg' (Field "optimizerOptions" msg) (Field "optimizerOptions" msg')

    outputPartitionGraphs :: forall msg msg'. HasField "outputPartitionGraphs" msg msg' => Lens msg msg' (Field "outputPartitionGraphs" msg) (Field "outputPartitionGraphs" msg')

    partitionGraphs :: forall msg msg'. HasField "partitionGraphs" msg msg' => Lens msg msg' (Field "partitionGraphs" msg) (Field "partitionGraphs" msg')

    perProcessGpuMemoryFraction :: forall msg msg'. HasField "perProcessGpuMemoryFraction" msg msg' => Lens msg msg' (Field "perProcessGpuMemoryFraction" msg) (Field "perProcessGpuMemoryFraction" msg')

    placePrunedGraph :: forall msg msg'. HasField "placePrunedGraph" msg msg' => Lens msg msg' (Field "placePrunedGraph" msg) (Field "placePrunedGraph" msg')

    placementPeriod :: forall msg msg'. HasField "placementPeriod" msg msg' => Lens msg msg' (Field "placementPeriod" msg) (Field "placementPeriod" msg')

    rpcOptions :: forall msg msg'. HasField "rpcOptions" msg msg' => Lens msg msg' (Field "rpcOptions" msg) (Field "rpcOptions" msg')

    sessionInterOpThreadPool :: forall msg msg'. HasField "sessionInterOpThreadPool" msg msg' => Lens msg msg' (Field "sessionInterOpThreadPool" msg) (Field "sessionInterOpThreadPool" msg')

    stepStats :: forall msg msg'. HasField "stepStats" msg msg' => Lens msg msg' (Field "stepStats" msg) (Field "stepStats" msg')

    timelineStep :: forall msg msg'. HasField "timelineStep" msg msg' => Lens msg msg' (Field "timelineStep" msg) (Field "timelineStep" msg')

    timeoutInMs :: forall msg msg'. HasField "timeoutInMs" msg msg' => Lens msg msg' (Field "timeoutInMs" msg) (Field "timeoutInMs" msg')

    traceLevel :: forall msg msg'. HasField "traceLevel" msg msg' => Lens msg msg' (Field "traceLevel" msg) (Field "traceLevel" msg')

    usePerSessionThreads :: forall msg msg'. HasField "usePerSessionThreads" msg msg' => Lens msg msg' (Field "usePerSessionThreads" msg) (Field "usePerSessionThreads" msg')

    useRpcForInprocessMaster :: forall msg msg'. HasField "useRpcForInprocessMaster" msg msg' => Lens msg msg' (Field "useRpcForInprocessMaster" msg) (Field "useRpcForInprocessMaster" msg')

    value :: forall msg msg'. HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg')

    visibleDeviceList :: forall msg msg'. HasField "visibleDeviceList" msg msg' => Lens msg msg' (Field "visibleDeviceList" msg) (Field "visibleDeviceList" msg')

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-Event.html b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-Event.html new file mode 100644 index 0000000..123009a --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/Proto-Tensorflow-Core-Util-Event.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Util.Event

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Safe HaskellNone
    LanguageHaskell2010

    Proto.Tensorflow.Core.Util.Event

    Documentation

    data Event

    Instances

    Eq Event 
    Show Event 
    Message Event 
    Default Event 
    HasField "fileVersion" Event Event 
    HasField "graphDef" Event Event 
    HasField "logMessage" Event Event 
    HasField "maybe'fileVersion" Event Event 
    HasField "maybe'graphDef" Event Event 
    HasField "maybe'logMessage" Event Event 
    HasField "maybe'metaGraphDef" Event Event 
    HasField "maybe'sessionLog" Event Event 
    HasField "maybe'summary" Event Event 
    HasField "maybe'taggedRunMetadata" Event Event 
    HasField "metaGraphDef" Event Event 
    HasField "sessionLog" Event Event 
    HasField "step" Event Event 
    HasField "summary" Event Event 
    HasField "taggedRunMetadata" Event Event 
    HasField "wallTime" Event Event 
    type Field "fileVersion" Event = Text 
    type Field "graphDef" Event = ByteString 
    type Field "logMessage" Event = LogMessage 
    type Field "maybe'fileVersion" Event = Maybe Text 
    type Field "maybe'graphDef" Event = Maybe ByteString 
    type Field "maybe'logMessage" Event = Maybe LogMessage 
    type Field "maybe'metaGraphDef" Event = Maybe ByteString 
    type Field "maybe'sessionLog" Event = Maybe SessionLog 
    type Field "maybe'summary" Event = Maybe Summary 
    type Field "maybe'taggedRunMetadata" Event = Maybe TaggedRunMetadata 
    type Field "metaGraphDef" Event = ByteString 
    type Field "sessionLog" Event = SessionLog 
    type Field "step" Event = Int64 
    type Field "summary" Event = Summary 
    type Field "taggedRunMetadata" Event = TaggedRunMetadata 
    type Field "wallTime" Event = Double 

    data LogMessage

    Instances

    Eq LogMessage 
    Show LogMessage 
    Message LogMessage 
    Default LogMessage 
    HasField "level" LogMessage LogMessage 
    HasField "message" LogMessage LogMessage 
    type Field "level" LogMessage = LogMessage'Level 
    type Field "message" LogMessage = Text 

    data SessionLog

    Instances

    Eq SessionLog 
    Show SessionLog 
    Message SessionLog 
    Default SessionLog 
    HasField "checkpointPath" SessionLog SessionLog 
    HasField "msg" SessionLog SessionLog 
    HasField "status" SessionLog SessionLog 
    type Field "checkpointPath" SessionLog = Text 
    type Field "msg" SessionLog = Text 
    type Field "status" SessionLog = SessionLog'SessionStatus 

    checkpointPath :: forall msg msg'. HasField "checkpointPath" msg msg' => Lens msg msg' (Field "checkpointPath" msg) (Field "checkpointPath" msg')

    fileVersion :: forall msg msg'. HasField "fileVersion" msg msg' => Lens msg msg' (Field "fileVersion" msg) (Field "fileVersion" msg')

    graphDef :: forall msg msg'. HasField "graphDef" msg msg' => Lens msg msg' (Field "graphDef" msg) (Field "graphDef" msg')

    level :: forall msg msg'. HasField "level" msg msg' => Lens msg msg' (Field "level" msg) (Field "level" msg')

    logMessage :: forall msg msg'. HasField "logMessage" msg msg' => Lens msg msg' (Field "logMessage" msg) (Field "logMessage" msg')

    maybe'fileVersion :: forall msg msg'. HasField "maybe'fileVersion" msg msg' => Lens msg msg' (Field "maybe'fileVersion" msg) (Field "maybe'fileVersion" msg')

    maybe'graphDef :: forall msg msg'. HasField "maybe'graphDef" msg msg' => Lens msg msg' (Field "maybe'graphDef" msg) (Field "maybe'graphDef" msg')

    maybe'logMessage :: forall msg msg'. HasField "maybe'logMessage" msg msg' => Lens msg msg' (Field "maybe'logMessage" msg) (Field "maybe'logMessage" msg')

    maybe'metaGraphDef :: forall msg msg'. HasField "maybe'metaGraphDef" msg msg' => Lens msg msg' (Field "maybe'metaGraphDef" msg) (Field "maybe'metaGraphDef" msg')

    maybe'sessionLog :: forall msg msg'. HasField "maybe'sessionLog" msg msg' => Lens msg msg' (Field "maybe'sessionLog" msg) (Field "maybe'sessionLog" msg')

    maybe'summary :: forall msg msg'. HasField "maybe'summary" msg msg' => Lens msg msg' (Field "maybe'summary" msg) (Field "maybe'summary" msg')

    maybe'taggedRunMetadata :: forall msg msg'. HasField "maybe'taggedRunMetadata" msg msg' => Lens msg msg' (Field "maybe'taggedRunMetadata" msg) (Field "maybe'taggedRunMetadata" msg')

    message :: forall msg msg'. HasField "message" msg msg' => Lens msg msg' (Field "message" msg) (Field "message" msg')

    metaGraphDef :: forall msg msg'. HasField "metaGraphDef" msg msg' => Lens msg msg' (Field "metaGraphDef" msg) (Field "metaGraphDef" msg')

    msg :: forall msg msg'. HasField "msg" msg msg' => Lens msg msg' (Field "msg" msg) (Field "msg" msg')

    runMetadata :: forall msg msg'. HasField "runMetadata" msg msg' => Lens msg msg' (Field "runMetadata" msg) (Field "runMetadata" msg')

    sessionLog :: forall msg msg'. HasField "sessionLog" msg msg' => Lens msg msg' (Field "sessionLog" msg) (Field "sessionLog" msg')

    status :: forall msg msg'. HasField "status" msg msg' => Lens msg msg' (Field "status" msg) (Field "status" msg')

    step :: forall msg msg'. HasField "step" msg msg' => Lens msg msg' (Field "step" msg) (Field "step" msg')

    summary :: forall msg msg'. HasField "summary" msg msg' => Lens msg msg' (Field "summary" msg) (Field "summary" msg')

    tag :: forall msg msg'. HasField "tag" msg msg' => Lens msg msg' (Field "tag" msg) (Field "tag" msg')

    taggedRunMetadata :: forall msg msg'. HasField "taggedRunMetadata" msg msg' => Lens msg msg' (Field "taggedRunMetadata" msg) (Field "taggedRunMetadata" msg')

    wallTime :: forall msg msg'. HasField "wallTime" msg msg' => Lens msg msg' (Field "wallTime" msg) (Field "wallTime" msg')

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html index 141d60d..950a75a 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-95.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - _)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index - _

    _AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
    _GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
    _GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
    _GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
    _NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
    _NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
    _OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'attrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
    _OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
    _OpDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
    _OpList'opProto.Tensorflow.Core.Framework.OpDef
    _OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
    _ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
    _RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
    _TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
    _TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
    _ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index - _

    _AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
    _Event'fileVersionProto.Tensorflow.Core.Util.Event
    _Event'graphDefProto.Tensorflow.Core.Util.Event
    _Event'logMessageProto.Tensorflow.Core.Util.Event
    _Event'metaGraphDefProto.Tensorflow.Core.Util.Event
    _Event'sessionLogProto.Tensorflow.Core.Util.Event
    _Event'stepProto.Tensorflow.Core.Util.Event
    _Event'summaryProto.Tensorflow.Core.Util.Event
    _Event'taggedRunMetadataProto.Tensorflow.Core.Util.Event
    _Event'wallTimeProto.Tensorflow.Core.Util.Event
    _GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
    _GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
    _GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
    _GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
    _HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'minProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'numProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
    _LogMessage'levelProto.Tensorflow.Core.Util.Event
    _LogMessage'messageProto.Tensorflow.Core.Util.Event
    _NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
    _NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
    _OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'attrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
    _OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
    _OpDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
    _OpList'opProto.Tensorflow.Core.Framework.OpDef
    _OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
    _ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
    _RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
    _SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
    _SessionLog'msgProto.Tensorflow.Core.Util.Event
    _SessionLog'statusProto.Tensorflow.Core.Util.Event
    _Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
    _Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
    _Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
    _Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
    _Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
    _Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
    _Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
    _Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
    _Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
    _Summary'valueProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'audioProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'histoProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'imageProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'simpleValueProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'tensorProto.Tensorflow.Core.Framework.Summary
    _SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
    _TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
    _TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
    _TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
    _TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
    _ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html index ee12573..2a1357e 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-A.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - A)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html index 2fa311b..8523ff5 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-All.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index

    allocatorTypeProto.Tensorflow.Core.Protobuf.Config
    allowedValuesProto.Tensorflow.Core.Framework.OpDef
    allowGrowthProto.Tensorflow.Core.Protobuf.Config
    allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
    allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
    attr 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    3 (Function)Proto.Tensorflow.Core.Framework.OpDef
    AttrValue 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    AttrValue'ListValue 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    bProto.Tensorflow.Core.Framework.AttrValue
    boolValProto.Tensorflow.Core.Framework.Tensor
    buildCostModelProto.Tensorflow.Core.Protobuf.Config
    buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
    ConfigProto 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    ConfigProto'DeviceCountEntry 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    containerProto.Tensorflow.Core.Framework.ResourceHandle
    costGraphProto.Tensorflow.Core.Protobuf.Config
    DataTypeProto.Tensorflow.Core.Framework.Types
    dcomplexValProto.Tensorflow.Core.Framework.Tensor
    debugOpsProto.Tensorflow.Core.Protobuf.Config
    DebugTensorWatch 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
    debugUrlsProto.Tensorflow.Core.Protobuf.Config
    defaultValueProto.Tensorflow.Core.Framework.OpDef
    deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    deprecationProto.Tensorflow.Core.Framework.OpDef
    descriptionProto.Tensorflow.Core.Framework.OpDef
    device 
    1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    deviceCountProto.Tensorflow.Core.Protobuf.Config
    deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    dimProto.Tensorflow.Core.Framework.TensorShape
    doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    doubleValProto.Tensorflow.Core.Framework.Tensor
    dtypeProto.Tensorflow.Core.Framework.Tensor
    DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
    DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
    DT_BOOLProto.Tensorflow.Core.Framework.Types
    DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
    DT_DOUBLEProto.Tensorflow.Core.Framework.Types
    DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
    DT_FLOATProto.Tensorflow.Core.Framework.Types
    DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
    DT_HALFProto.Tensorflow.Core.Framework.Types
    DT_HALF_REFProto.Tensorflow.Core.Framework.Types
    DT_INT16Proto.Tensorflow.Core.Framework.Types
    DT_INT16_REFProto.Tensorflow.Core.Framework.Types
    DT_INT32Proto.Tensorflow.Core.Framework.Types
    DT_INT32_REFProto.Tensorflow.Core.Framework.Types
    DT_INT64Proto.Tensorflow.Core.Framework.Types
    DT_INT64_REFProto.Tensorflow.Core.Framework.Types
    DT_INT8Proto.Tensorflow.Core.Framework.Types
    DT_INT8_REFProto.Tensorflow.Core.Framework.Types
    DT_INVALIDProto.Tensorflow.Core.Framework.Types
    DT_QINT16Proto.Tensorflow.Core.Framework.Types
    DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT32Proto.Tensorflow.Core.Framework.Types
    DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT8Proto.Tensorflow.Core.Framework.Types
    DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT16Proto.Tensorflow.Core.Framework.Types
    DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT8Proto.Tensorflow.Core.Framework.Types
    DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_RESOURCEProto.Tensorflow.Core.Framework.Types
    DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
    DT_STRINGProto.Tensorflow.Core.Framework.Types
    DT_STRING_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT16Proto.Tensorflow.Core.Framework.Types
    DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT8Proto.Tensorflow.Core.Framework.Types
    DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
    enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
    enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
    explanationProto.Tensorflow.Core.Framework.OpDef
    fProto.Tensorflow.Core.Framework.AttrValue
    floatValProto.Tensorflow.Core.Framework.Tensor
    funcProto.Tensorflow.Core.Framework.AttrValue
    GPUOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    GraphDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
    GraphOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    graphOptionsProto.Tensorflow.Core.Protobuf.Config
    halfValProto.Tensorflow.Core.Framework.Tensor
    hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
    hasMinimumProto.Tensorflow.Core.Framework.OpDef
    iProto.Tensorflow.Core.Framework.AttrValue
    inferShapesProto.Tensorflow.Core.Protobuf.Config
    inputProto.Tensorflow.Core.Framework.NodeDef
    inputArgProto.Tensorflow.Core.Framework.OpDef
    int64ValProto.Tensorflow.Core.Framework.Tensor
    interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    intValProto.Tensorflow.Core.Framework.Tensor
    isAggregateProto.Tensorflow.Core.Framework.OpDef
    isCommutativeProto.Tensorflow.Core.Framework.OpDef
    isRefProto.Tensorflow.Core.Framework.OpDef
    isStatefulProto.Tensorflow.Core.Framework.OpDef
    key 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    3 (Function)Proto.Tensorflow.Core.Protobuf.Config
    libraryProto.Tensorflow.Core.Framework.Graph
    listProto.Tensorflow.Core.Framework.AttrValue
    logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
    maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
    maybe'bProto.Tensorflow.Core.Framework.AttrValue
    maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
    maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
    maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
    maybe'fProto.Tensorflow.Core.Framework.AttrValue
    maybe'funcProto.Tensorflow.Core.Framework.AttrValue
    maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'iProto.Tensorflow.Core.Framework.AttrValue
    maybe'libraryProto.Tensorflow.Core.Framework.Graph
    maybe'listProto.Tensorflow.Core.Framework.AttrValue
    maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
    maybe'sProto.Tensorflow.Core.Framework.AttrValue
    maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
    maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
    maybe'tensorProto.Tensorflow.Core.Framework.AttrValue
    maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
    maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
    maybe'value 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    maybe'versionsProto.Tensorflow.Core.Framework.Graph
    maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
    minimumProto.Tensorflow.Core.Framework.OpDef
    name 
    1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Function)Proto.Tensorflow.Core.Framework.TensorShape
    3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    4 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    5 (Function)Proto.Tensorflow.Core.Framework.OpDef
    NameAttrList 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    NameAttrList'AttrEntry 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    nodeProto.Tensorflow.Core.Framework.Graph
    NodeDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
    NodeDef'AttrEntry 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
    nodeNameProto.Tensorflow.Core.Protobuf.Config
    numberAttrProto.Tensorflow.Core.Framework.OpDef
    numThreadsProto.Tensorflow.Core.Protobuf.Config
    op 
    1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    2 (Function)Proto.Tensorflow.Core.Framework.OpDef
    OpDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDef'ArgDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDef'AttrDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDeprecation 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
    OpList 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OptimizerOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
    optLevelProto.Tensorflow.Core.Protobuf.Config
    outputArgProto.Tensorflow.Core.Framework.OpDef
    outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
    outputSlotProto.Tensorflow.Core.Protobuf.Config
    partitionGraphsProto.Tensorflow.Core.Protobuf.Config
    perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
    placeholderProto.Tensorflow.Core.Framework.AttrValue
    placementPeriodProto.Tensorflow.Core.Protobuf.Config
    placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
    ResourceHandle 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
    resourceHandleValProto.Tensorflow.Core.Framework.Tensor
    RunMetadata 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    RunOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
    sProto.Tensorflow.Core.Framework.AttrValue
    scomplexValProto.Tensorflow.Core.Framework.Tensor
    sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    shapeProto.Tensorflow.Core.Framework.AttrValue
    sizeProto.Tensorflow.Core.Framework.TensorShape
    stepStatsProto.Tensorflow.Core.Protobuf.Config
    stringValProto.Tensorflow.Core.Framework.Tensor
    summaryProto.Tensorflow.Core.Framework.OpDef
    tensorProto.Tensorflow.Core.Framework.AttrValue
    tensorContentProto.Tensorflow.Core.Framework.Tensor
    TensorProto 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
    tensorShapeProto.Tensorflow.Core.Framework.Tensor
    TensorShapeProto 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
    TensorShapeProto'Dim 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
    ThreadPoolOptionProto 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    timelineStepProto.Tensorflow.Core.Protobuf.Config
    timeoutInMsProto.Tensorflow.Core.Protobuf.Config
    traceLevelProto.Tensorflow.Core.Protobuf.Config
    type' 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.OpDef
    typeAttrProto.Tensorflow.Core.Framework.OpDef
    typeListAttrProto.Tensorflow.Core.Framework.OpDef
    unknownRankProto.Tensorflow.Core.Framework.TensorShape
    usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
    value 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    3 (Function)Proto.Tensorflow.Core.Protobuf.Config
    version 
    1 (Function)Proto.Tensorflow.Core.Framework.OpDef
    2 (Function)Proto.Tensorflow.Core.Framework.Graph
    versionNumberProto.Tensorflow.Core.Framework.Tensor
    versionsProto.Tensorflow.Core.Framework.Graph
    visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
    _AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'debugOpsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'debugUrlsProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'nodeNameProto.Tensorflow.Core.Protobuf.Config
    _DebugTensorWatch'outputSlotProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
    _GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
    _GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
    _GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
    _NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
    _NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
    _OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'attrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
    _OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
    _OpDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
    _OpList'opProto.Tensorflow.Core.Framework.OpDef
    _OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
    _ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
    _RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
    _TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
    _TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
    _ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index

    allocatorTypeProto.Tensorflow.Core.Protobuf.Config
    allowedValuesProto.Tensorflow.Core.Framework.OpDef
    allowGrowthProto.Tensorflow.Core.Protobuf.Config
    allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
    allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
    attr 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    3 (Function)Proto.Tensorflow.Core.Framework.OpDef
    AttrValue 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    AttrValue'ListValue 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    audioProto.Tensorflow.Core.Framework.Summary
    bProto.Tensorflow.Core.Framework.AttrValue
    boolValProto.Tensorflow.Core.Framework.Tensor
    bucketProto.Tensorflow.Core.Framework.Summary
    bucketLimitProto.Tensorflow.Core.Framework.Summary
    buildCostModelProto.Tensorflow.Core.Protobuf.Config
    buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
    checkpointPathProto.Tensorflow.Core.Util.Event
    colorspaceProto.Tensorflow.Core.Framework.Summary
    ConfigProto 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    ConfigProto'DeviceCountEntry 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    containerProto.Tensorflow.Core.Framework.ResourceHandle
    contentTypeProto.Tensorflow.Core.Framework.Summary
    costGraphProto.Tensorflow.Core.Protobuf.Config
    DataTypeProto.Tensorflow.Core.Framework.Types
    dcomplexValProto.Tensorflow.Core.Framework.Tensor
    debugOptionsProto.Tensorflow.Core.Protobuf.Config
    defaultValueProto.Tensorflow.Core.Framework.OpDef
    deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    deprecationProto.Tensorflow.Core.Framework.OpDef
    descriptionProto.Tensorflow.Core.Framework.OpDef
    device 
    1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    deviceCountProto.Tensorflow.Core.Protobuf.Config
    deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    dimProto.Tensorflow.Core.Framework.TensorShape
    doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    doubleValProto.Tensorflow.Core.Framework.Tensor
    dtypeProto.Tensorflow.Core.Framework.Tensor
    DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
    DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
    DT_BOOLProto.Tensorflow.Core.Framework.Types
    DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
    DT_DOUBLEProto.Tensorflow.Core.Framework.Types
    DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
    DT_FLOATProto.Tensorflow.Core.Framework.Types
    DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
    DT_HALFProto.Tensorflow.Core.Framework.Types
    DT_HALF_REFProto.Tensorflow.Core.Framework.Types
    DT_INT16Proto.Tensorflow.Core.Framework.Types
    DT_INT16_REFProto.Tensorflow.Core.Framework.Types
    DT_INT32Proto.Tensorflow.Core.Framework.Types
    DT_INT32_REFProto.Tensorflow.Core.Framework.Types
    DT_INT64Proto.Tensorflow.Core.Framework.Types
    DT_INT64_REFProto.Tensorflow.Core.Framework.Types
    DT_INT8Proto.Tensorflow.Core.Framework.Types
    DT_INT8_REFProto.Tensorflow.Core.Framework.Types
    DT_INVALIDProto.Tensorflow.Core.Framework.Types
    DT_QINT16Proto.Tensorflow.Core.Framework.Types
    DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT32Proto.Tensorflow.Core.Framework.Types
    DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT8Proto.Tensorflow.Core.Framework.Types
    DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT16Proto.Tensorflow.Core.Framework.Types
    DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT8Proto.Tensorflow.Core.Framework.Types
    DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_RESOURCEProto.Tensorflow.Core.Framework.Types
    DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
    DT_STRINGProto.Tensorflow.Core.Framework.Types
    DT_STRING_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT16Proto.Tensorflow.Core.Framework.Types
    DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT8Proto.Tensorflow.Core.Framework.Types
    DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
    enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
    enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
    encodedAudioStringProto.Tensorflow.Core.Framework.Summary
    encodedImageStringProto.Tensorflow.Core.Framework.Summary
    Event 
    1 (Type/Class)Proto.Tensorflow.Core.Util.Event
    2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
    explanationProto.Tensorflow.Core.Framework.OpDef
    fProto.Tensorflow.Core.Framework.AttrValue
    fileVersionProto.Tensorflow.Core.Util.Event
    floatValProto.Tensorflow.Core.Framework.Tensor
    funcProto.Tensorflow.Core.Framework.AttrValue
    globalJitLevelProto.Tensorflow.Core.Protobuf.Config
    GPUOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    GraphDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Graph
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Graph
    graphDefProto.Tensorflow.Core.Util.Event
    GraphOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    graphOptionsProto.Tensorflow.Core.Protobuf.Config
    halfValProto.Tensorflow.Core.Framework.Tensor
    hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
    hasMinimumProto.Tensorflow.Core.Framework.OpDef
    heightProto.Tensorflow.Core.Framework.Summary
    histoProto.Tensorflow.Core.Framework.Summary
    HistogramProto 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
    iProto.Tensorflow.Core.Framework.AttrValue
    imageProto.Tensorflow.Core.Framework.Summary
    inferShapesProto.Tensorflow.Core.Protobuf.Config
    inputProto.Tensorflow.Core.Framework.NodeDef
    inputArgProto.Tensorflow.Core.Framework.OpDef
    int64ValProto.Tensorflow.Core.Framework.Tensor
    interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    intValProto.Tensorflow.Core.Framework.Tensor
    isAggregateProto.Tensorflow.Core.Framework.OpDef
    isCommutativeProto.Tensorflow.Core.Framework.OpDef
    isRefProto.Tensorflow.Core.Framework.OpDef
    isStatefulProto.Tensorflow.Core.Framework.OpDef
    key 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    3 (Function)Proto.Tensorflow.Core.Protobuf.Config
    lengthFramesProto.Tensorflow.Core.Framework.Summary
    levelProto.Tensorflow.Core.Util.Event
    libraryProto.Tensorflow.Core.Framework.Graph
    listProto.Tensorflow.Core.Framework.AttrValue
    logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
    LogMessage 
    1 (Type/Class)Proto.Tensorflow.Core.Util.Event
    2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
    logMessageProto.Tensorflow.Core.Util.Event
    LogMessage'DEBUGProto.Tensorflow.Core.Util.Event
    LogMessage'ERRORProto.Tensorflow.Core.Util.Event
    LogMessage'FATALProto.Tensorflow.Core.Util.Event
    LogMessage'INFOProto.Tensorflow.Core.Util.Event
    LogMessage'LevelProto.Tensorflow.Core.Util.Event
    LogMessage'UNKNOWNProto.Tensorflow.Core.Util.Event
    LogMessage'WARNProto.Tensorflow.Core.Util.Event
    maxProto.Tensorflow.Core.Framework.Summary
    maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
    maybe'audioProto.Tensorflow.Core.Framework.Summary
    maybe'bProto.Tensorflow.Core.Framework.AttrValue
    maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
    maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
    maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
    maybe'fProto.Tensorflow.Core.Framework.AttrValue
    maybe'fileVersionProto.Tensorflow.Core.Util.Event
    maybe'funcProto.Tensorflow.Core.Framework.AttrValue
    maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'graphDefProto.Tensorflow.Core.Util.Event
    maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'histoProto.Tensorflow.Core.Framework.Summary
    maybe'iProto.Tensorflow.Core.Framework.AttrValue
    maybe'imageProto.Tensorflow.Core.Framework.Summary
    maybe'libraryProto.Tensorflow.Core.Framework.Graph
    maybe'listProto.Tensorflow.Core.Framework.AttrValue
    maybe'logMessageProto.Tensorflow.Core.Util.Event
    maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
    maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
    maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
    maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'sProto.Tensorflow.Core.Framework.AttrValue
    maybe'sessionLogProto.Tensorflow.Core.Util.Event
    maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
    maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
    maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
    maybe'summaryProto.Tensorflow.Core.Util.Event
    maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
    maybe'tensor 
    1 (Function)Proto.Tensorflow.Core.Framework.Summary
    2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
    maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
    maybe'value 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    maybe'versionsProto.Tensorflow.Core.Framework.Graph
    maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
    messageProto.Tensorflow.Core.Util.Event
    metaGraphDefProto.Tensorflow.Core.Util.Event
    minProto.Tensorflow.Core.Framework.Summary
    minimumProto.Tensorflow.Core.Framework.OpDef
    msgProto.Tensorflow.Core.Util.Event
    name 
    1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Function)Proto.Tensorflow.Core.Framework.TensorShape
    3 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    4 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    5 (Function)Proto.Tensorflow.Core.Framework.OpDef
    NameAttrList 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    NameAttrList'AttrEntry 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.AttrValue
    nodeProto.Tensorflow.Core.Framework.Graph
    NodeDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
    NodeDef'AttrEntry 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.NodeDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.NodeDef
    nodeNameProto.Tensorflow.Core.Framework.Summary
    numProto.Tensorflow.Core.Framework.Summary
    numberAttrProto.Tensorflow.Core.Framework.OpDef
    numChannelsProto.Tensorflow.Core.Framework.Summary
    numThreadsProto.Tensorflow.Core.Protobuf.Config
    obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
    op 
    1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    2 (Function)Proto.Tensorflow.Core.Framework.OpDef
    OpDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDef'ArgDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDef'AttrDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDeprecation 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
    OpList 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OptimizerOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
    optLevelProto.Tensorflow.Core.Protobuf.Config
    outputArgProto.Tensorflow.Core.Framework.OpDef
    outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
    partitionGraphsProto.Tensorflow.Core.Protobuf.Config
    perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
    placeholderProto.Tensorflow.Core.Framework.AttrValue
    placementPeriodProto.Tensorflow.Core.Protobuf.Config
    placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
    ResourceHandle 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.ResourceHandle
    resourceHandleValProto.Tensorflow.Core.Framework.Tensor
    RPCOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    rpcOptionsProto.Tensorflow.Core.Protobuf.Config
    RunMetadata 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    runMetadataProto.Tensorflow.Core.Util.Event
    RunOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    RunOptions'FULL_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'HARDWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'NO_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'SOFTWARE_TRACEProto.Tensorflow.Core.Protobuf.Config
    RunOptions'TraceLevelProto.Tensorflow.Core.Protobuf.Config
    sProto.Tensorflow.Core.Framework.AttrValue
    sampleRateProto.Tensorflow.Core.Framework.Summary
    scomplexValProto.Tensorflow.Core.Framework.Tensor
    sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    SessionLog 
    1 (Type/Class)Proto.Tensorflow.Core.Util.Event
    2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
    sessionLogProto.Tensorflow.Core.Util.Event
    SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
    SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
    SessionLog'STARTProto.Tensorflow.Core.Util.Event
    SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
    SessionLog'STOPProto.Tensorflow.Core.Util.Event
    shapeProto.Tensorflow.Core.Framework.AttrValue
    simpleValueProto.Tensorflow.Core.Framework.Summary
    sizeProto.Tensorflow.Core.Framework.TensorShape
    statusProto.Tensorflow.Core.Util.Event
    stepProto.Tensorflow.Core.Util.Event
    stepStatsProto.Tensorflow.Core.Protobuf.Config
    stringValProto.Tensorflow.Core.Framework.Tensor
    sumProto.Tensorflow.Core.Framework.Summary
    Summary 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
    summary 
    1 (Function)Proto.Tensorflow.Core.Util.Event
    2 (Function)Proto.Tensorflow.Core.Framework.OpDef
    Summary'Audio 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
    Summary'Image 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
    Summary'Value 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
    SummaryDescription 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
    sumSquaresProto.Tensorflow.Core.Framework.Summary
    tag 
    1 (Function)Proto.Tensorflow.Core.Framework.Summary
    2 (Function)Proto.Tensorflow.Core.Util.Event
    TaggedRunMetadata 
    1 (Type/Class)Proto.Tensorflow.Core.Util.Event
    2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
    taggedRunMetadataProto.Tensorflow.Core.Util.Event
    tensor 
    1 (Function)Proto.Tensorflow.Core.Framework.Summary
    2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    tensorContentProto.Tensorflow.Core.Framework.Tensor
    TensorProto 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Tensor
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Tensor
    tensorShapeProto.Tensorflow.Core.Framework.Tensor
    TensorShapeProto 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
    TensorShapeProto'Dim 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.TensorShape
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.TensorShape
    ThreadPoolOptionProto 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    timelineStepProto.Tensorflow.Core.Protobuf.Config
    timeoutInMsProto.Tensorflow.Core.Protobuf.Config
    traceLevelProto.Tensorflow.Core.Protobuf.Config
    type' 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.OpDef
    typeAttrProto.Tensorflow.Core.Framework.OpDef
    typeHintProto.Tensorflow.Core.Framework.Summary
    typeListAttrProto.Tensorflow.Core.Framework.OpDef
    unknownRankProto.Tensorflow.Core.Framework.TensorShape
    usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
    useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
    value 
    1 (Function)Proto.Tensorflow.Core.Framework.Summary
    2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    3 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    4 (Function)Proto.Tensorflow.Core.Protobuf.Config
    version 
    1 (Function)Proto.Tensorflow.Core.Framework.OpDef
    2 (Function)Proto.Tensorflow.Core.Framework.Graph
    versionNumberProto.Tensorflow.Core.Framework.Tensor
    versionsProto.Tensorflow.Core.Framework.Graph
    visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
    wallTimeProto.Tensorflow.Core.Util.Event
    widthProto.Tensorflow.Core.Framework.Summary
    _AttrValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'funcProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'listProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'bProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'fProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'funcProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'iProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'ListValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'placeholderProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'sProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'shapeProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'tensorProto.Tensorflow.Core.Framework.AttrValue
    _AttrValue'type'Proto.Tensorflow.Core.Framework.AttrValue
    _ConfigProto'allowSoftPlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceCountProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'keyProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'DeviceCountEntry'valueProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'graphOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'interOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'intraOpParallelismThreadsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'logDevicePlacementProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'placementPeriodProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _ConfigProto'usePerSessionThreadsProto.Tensorflow.Core.Protobuf.Config
    _Event'fileVersionProto.Tensorflow.Core.Util.Event
    _Event'graphDefProto.Tensorflow.Core.Util.Event
    _Event'logMessageProto.Tensorflow.Core.Util.Event
    _Event'metaGraphDefProto.Tensorflow.Core.Util.Event
    _Event'sessionLogProto.Tensorflow.Core.Util.Event
    _Event'stepProto.Tensorflow.Core.Util.Event
    _Event'summaryProto.Tensorflow.Core.Util.Event
    _Event'taggedRunMetadataProto.Tensorflow.Core.Util.Event
    _Event'wallTimeProto.Tensorflow.Core.Util.Event
    _GPUOptions'allocatorTypeProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'allowGrowthProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'perProcessGpuMemoryFractionProto.Tensorflow.Core.Protobuf.Config
    _GPUOptions'visibleDeviceListProto.Tensorflow.Core.Protobuf.Config
    _GraphDef'libraryProto.Tensorflow.Core.Framework.Graph
    _GraphDef'nodeProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionProto.Tensorflow.Core.Framework.Graph
    _GraphDef'versionsProto.Tensorflow.Core.Framework.Graph
    _GraphOptions'buildCostModelProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'buildCostModelAfterProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableBfloat16SendrecvProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'enableRecvSchedulingProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'inferShapesProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'placePrunedGraphProto.Tensorflow.Core.Protobuf.Config
    _GraphOptions'timelineStepProto.Tensorflow.Core.Protobuf.Config
    _HistogramProto'bucketProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'bucketLimitProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'maxProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'minProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'numProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'sumProto.Tensorflow.Core.Framework.Summary
    _HistogramProto'sumSquaresProto.Tensorflow.Core.Framework.Summary
    _LogMessage'levelProto.Tensorflow.Core.Util.Event
    _LogMessage'messageProto.Tensorflow.Core.Util.Event
    _NameAttrList'attrProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'keyProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'AttrEntry'valueProto.Tensorflow.Core.Framework.AttrValue
    _NameAttrList'nameProto.Tensorflow.Core.Framework.AttrValue
    _NodeDef'attrProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'keyProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'AttrEntry'valueProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'deviceProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'inputProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'nameProto.Tensorflow.Core.Framework.NodeDef
    _NodeDef'opProto.Tensorflow.Core.Framework.NodeDef
    _OpDef'allowsUninitializedInputProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'isRefProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'numberAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'ArgDef'typeListAttrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'attrProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'allowedValuesProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'defaultValueProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'hasMinimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'minimumProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'AttrDef'type'Proto.Tensorflow.Core.Framework.OpDef
    _OpDef'deprecationProto.Tensorflow.Core.Framework.OpDef
    _OpDef'descriptionProto.Tensorflow.Core.Framework.OpDef
    _OpDef'inputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isAggregateProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isCommutativeProto.Tensorflow.Core.Framework.OpDef
    _OpDef'isStatefulProto.Tensorflow.Core.Framework.OpDef
    _OpDef'nameProto.Tensorflow.Core.Framework.OpDef
    _OpDef'outputArgProto.Tensorflow.Core.Framework.OpDef
    _OpDef'summaryProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'explanationProto.Tensorflow.Core.Framework.OpDef
    _OpDeprecation'versionProto.Tensorflow.Core.Framework.OpDef
    _OpList'opProto.Tensorflow.Core.Framework.OpDef
    _OptimizerOptions'doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'globalJitLevelProto.Tensorflow.Core.Protobuf.Config
    _OptimizerOptions'optLevelProto.Tensorflow.Core.Protobuf.Config
    _ResourceHandle'containerProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'deviceProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'hashCodeProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
    _ResourceHandle'nameProto.Tensorflow.Core.Framework.ResourceHandle
    _RPCOptions'useRpcForInprocessMasterProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'costGraphProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'partitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunMetadata'stepStatsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'debugOptionsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'interOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'timeoutInMsProto.Tensorflow.Core.Protobuf.Config
    _RunOptions'traceLevelProto.Tensorflow.Core.Protobuf.Config
    _SessionLog'checkpointPathProto.Tensorflow.Core.Util.Event
    _SessionLog'msgProto.Tensorflow.Core.Util.Event
    _SessionLog'statusProto.Tensorflow.Core.Util.Event
    _Summary'Audio'contentTypeProto.Tensorflow.Core.Framework.Summary
    _Summary'Audio'encodedAudioStringProto.Tensorflow.Core.Framework.Summary
    _Summary'Audio'lengthFramesProto.Tensorflow.Core.Framework.Summary
    _Summary'Audio'numChannelsProto.Tensorflow.Core.Framework.Summary
    _Summary'Audio'sampleRateProto.Tensorflow.Core.Framework.Summary
    _Summary'Image'colorspaceProto.Tensorflow.Core.Framework.Summary
    _Summary'Image'encodedImageStringProto.Tensorflow.Core.Framework.Summary
    _Summary'Image'heightProto.Tensorflow.Core.Framework.Summary
    _Summary'Image'widthProto.Tensorflow.Core.Framework.Summary
    _Summary'valueProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'audioProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'histoProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'imageProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'nodeNameProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'simpleValueProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'tagProto.Tensorflow.Core.Framework.Summary
    _Summary'Value'tensorProto.Tensorflow.Core.Framework.Summary
    _SummaryDescription'typeHintProto.Tensorflow.Core.Framework.Summary
    _TaggedRunMetadata'runMetadataProto.Tensorflow.Core.Util.Event
    _TaggedRunMetadata'tagProto.Tensorflow.Core.Util.Event
    _TensorProto'boolValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dcomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'doubleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'dtypeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'floatValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'halfValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'int64ValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'intValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'resourceHandleValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'scomplexValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'stringValProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorContentProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'tensorShapeProto.Tensorflow.Core.Framework.Tensor
    _TensorProto'versionNumberProto.Tensorflow.Core.Framework.Tensor
    _TensorShapeProto'dimProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'nameProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'Dim'sizeProto.Tensorflow.Core.Framework.TensorShape
    _TensorShapeProto'unknownRankProto.Tensorflow.Core.Framework.TensorShape
    _ThreadPoolOptionProto'numThreadsProto.Tensorflow.Core.Protobuf.Config
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html index ae55437..d24203f 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-B.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - B)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html index f072b65..fda7550 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-C.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - C)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html index 9d5266e..ab1367b 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-D.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - D)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index - D

    DataTypeProto.Tensorflow.Core.Framework.Types
    dcomplexValProto.Tensorflow.Core.Framework.Tensor
    debugOpsProto.Tensorflow.Core.Protobuf.Config
    DebugTensorWatch 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    debugTensorWatchOptsProto.Tensorflow.Core.Protobuf.Config
    debugUrlsProto.Tensorflow.Core.Protobuf.Config
    defaultValueProto.Tensorflow.Core.Framework.OpDef
    deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    deprecationProto.Tensorflow.Core.Framework.OpDef
    descriptionProto.Tensorflow.Core.Framework.OpDef
    device 
    1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    deviceCountProto.Tensorflow.Core.Protobuf.Config
    deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    dimProto.Tensorflow.Core.Framework.TensorShape
    doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    doubleValProto.Tensorflow.Core.Framework.Tensor
    dtypeProto.Tensorflow.Core.Framework.Tensor
    DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
    DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
    DT_BOOLProto.Tensorflow.Core.Framework.Types
    DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
    DT_DOUBLEProto.Tensorflow.Core.Framework.Types
    DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
    DT_FLOATProto.Tensorflow.Core.Framework.Types
    DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
    DT_HALFProto.Tensorflow.Core.Framework.Types
    DT_HALF_REFProto.Tensorflow.Core.Framework.Types
    DT_INT16Proto.Tensorflow.Core.Framework.Types
    DT_INT16_REFProto.Tensorflow.Core.Framework.Types
    DT_INT32Proto.Tensorflow.Core.Framework.Types
    DT_INT32_REFProto.Tensorflow.Core.Framework.Types
    DT_INT64Proto.Tensorflow.Core.Framework.Types
    DT_INT64_REFProto.Tensorflow.Core.Framework.Types
    DT_INT8Proto.Tensorflow.Core.Framework.Types
    DT_INT8_REFProto.Tensorflow.Core.Framework.Types
    DT_INVALIDProto.Tensorflow.Core.Framework.Types
    DT_QINT16Proto.Tensorflow.Core.Framework.Types
    DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT32Proto.Tensorflow.Core.Framework.Types
    DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT8Proto.Tensorflow.Core.Framework.Types
    DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT16Proto.Tensorflow.Core.Framework.Types
    DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT8Proto.Tensorflow.Core.Framework.Types
    DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_RESOURCEProto.Tensorflow.Core.Framework.Types
    DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
    DT_STRINGProto.Tensorflow.Core.Framework.Types
    DT_STRING_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT16Proto.Tensorflow.Core.Framework.Types
    DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT8Proto.Tensorflow.Core.Framework.Types
    DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index - D

    DataTypeProto.Tensorflow.Core.Framework.Types
    dcomplexValProto.Tensorflow.Core.Framework.Tensor
    debugOptionsProto.Tensorflow.Core.Protobuf.Config
    defaultValueProto.Tensorflow.Core.Framework.OpDef
    deferredDeletionBytesProto.Tensorflow.Core.Protobuf.Config
    deprecationProto.Tensorflow.Core.Framework.OpDef
    descriptionProto.Tensorflow.Core.Framework.OpDef
    device 
    1 (Function)Proto.Tensorflow.Core.Framework.ResourceHandle
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    deviceCountProto.Tensorflow.Core.Protobuf.Config
    deviceFiltersProto.Tensorflow.Core.Protobuf.Config
    dimProto.Tensorflow.Core.Framework.TensorShape
    doCommonSubexpressionEliminationProto.Tensorflow.Core.Protobuf.Config
    doConstantFoldingProto.Tensorflow.Core.Protobuf.Config
    doFunctionInliningProto.Tensorflow.Core.Protobuf.Config
    doubleValProto.Tensorflow.Core.Framework.Tensor
    dtypeProto.Tensorflow.Core.Framework.Tensor
    DT_BFLOAT16Proto.Tensorflow.Core.Framework.Types
    DT_BFLOAT16_REFProto.Tensorflow.Core.Framework.Types
    DT_BOOLProto.Tensorflow.Core.Framework.Types
    DT_BOOL_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX128_REFProto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64Proto.Tensorflow.Core.Framework.Types
    DT_COMPLEX64_REFProto.Tensorflow.Core.Framework.Types
    DT_DOUBLEProto.Tensorflow.Core.Framework.Types
    DT_DOUBLE_REFProto.Tensorflow.Core.Framework.Types
    DT_FLOATProto.Tensorflow.Core.Framework.Types
    DT_FLOAT_REFProto.Tensorflow.Core.Framework.Types
    DT_HALFProto.Tensorflow.Core.Framework.Types
    DT_HALF_REFProto.Tensorflow.Core.Framework.Types
    DT_INT16Proto.Tensorflow.Core.Framework.Types
    DT_INT16_REFProto.Tensorflow.Core.Framework.Types
    DT_INT32Proto.Tensorflow.Core.Framework.Types
    DT_INT32_REFProto.Tensorflow.Core.Framework.Types
    DT_INT64Proto.Tensorflow.Core.Framework.Types
    DT_INT64_REFProto.Tensorflow.Core.Framework.Types
    DT_INT8Proto.Tensorflow.Core.Framework.Types
    DT_INT8_REFProto.Tensorflow.Core.Framework.Types
    DT_INVALIDProto.Tensorflow.Core.Framework.Types
    DT_QINT16Proto.Tensorflow.Core.Framework.Types
    DT_QINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT32Proto.Tensorflow.Core.Framework.Types
    DT_QINT32_REFProto.Tensorflow.Core.Framework.Types
    DT_QINT8Proto.Tensorflow.Core.Framework.Types
    DT_QINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT16Proto.Tensorflow.Core.Framework.Types
    DT_QUINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_QUINT8Proto.Tensorflow.Core.Framework.Types
    DT_QUINT8_REFProto.Tensorflow.Core.Framework.Types
    DT_RESOURCEProto.Tensorflow.Core.Framework.Types
    DT_RESOURCE_REFProto.Tensorflow.Core.Framework.Types
    DT_STRINGProto.Tensorflow.Core.Framework.Types
    DT_STRING_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT16Proto.Tensorflow.Core.Framework.Types
    DT_UINT16_REFProto.Tensorflow.Core.Framework.Types
    DT_UINT8Proto.Tensorflow.Core.Framework.Types
    DT_UINT8_REFProto.Tensorflow.Core.Framework.Types
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html index 75c8281..09068b6 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-E.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - E)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html index 38b9899..6c182e3 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-F.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - F)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html index 48b269f..33f0067 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-G.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - G)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html index e71b627..44e25df 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-H.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - H)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html index a29b263..f918573 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-I.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - I)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html index 8a70b07..457c499 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-K.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - K)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html index e9e6c5c..69c144f 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-L.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - L)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html index 3e02ce7..1ca17c2 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-M.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - M)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index - M

    maxProto.Tensorflow.Core.Framework.Summary
    maybe'allowedValuesProto.Tensorflow.Core.Framework.OpDef
    maybe'audioProto.Tensorflow.Core.Framework.Summary
    maybe'bProto.Tensorflow.Core.Framework.AttrValue
    maybe'costGraphProto.Tensorflow.Core.Protobuf.Config
    maybe'debugOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'defaultValueProto.Tensorflow.Core.Framework.OpDef
    maybe'deprecationProto.Tensorflow.Core.Framework.OpDef
    maybe'fProto.Tensorflow.Core.Framework.AttrValue
    maybe'fileVersionProto.Tensorflow.Core.Util.Event
    maybe'funcProto.Tensorflow.Core.Framework.AttrValue
    maybe'gpuOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'graphDefProto.Tensorflow.Core.Util.Event
    maybe'graphOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'histoProto.Tensorflow.Core.Framework.Summary
    maybe'iProto.Tensorflow.Core.Framework.AttrValue
    maybe'imageProto.Tensorflow.Core.Framework.Summary
    maybe'libraryProto.Tensorflow.Core.Framework.Graph
    maybe'listProto.Tensorflow.Core.Framework.AttrValue
    maybe'logMessageProto.Tensorflow.Core.Util.Event
    maybe'metaGraphDefProto.Tensorflow.Core.Util.Event
    maybe'obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
    maybe'optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'placeholderProto.Tensorflow.Core.Framework.AttrValue
    maybe'rpcOptionsProto.Tensorflow.Core.Protobuf.Config
    maybe'sProto.Tensorflow.Core.Framework.AttrValue
    maybe'sessionLogProto.Tensorflow.Core.Util.Event
    maybe'shapeProto.Tensorflow.Core.Framework.AttrValue
    maybe'simpleValueProto.Tensorflow.Core.Framework.Summary
    maybe'stepStatsProto.Tensorflow.Core.Protobuf.Config
    maybe'summaryProto.Tensorflow.Core.Util.Event
    maybe'taggedRunMetadataProto.Tensorflow.Core.Util.Event
    maybe'tensor 
    1 (Function)Proto.Tensorflow.Core.Framework.Summary
    2 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    maybe'tensorShapeProto.Tensorflow.Core.Framework.Tensor
    maybe'type'Proto.Tensorflow.Core.Framework.AttrValue
    maybe'value 
    1 (Function)Proto.Tensorflow.Core.Framework.AttrValue
    2 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    maybe'versionsProto.Tensorflow.Core.Framework.Graph
    maybeTypeNameProto.Tensorflow.Core.Framework.ResourceHandle
    messageProto.Tensorflow.Core.Util.Event
    metaGraphDefProto.Tensorflow.Core.Util.Event
    minProto.Tensorflow.Core.Framework.Summary
    minimumProto.Tensorflow.Core.Framework.OpDef
    msgProto.Tensorflow.Core.Util.Event
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html index 89af93f..337139c 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-N.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - N)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html index 417b5cd..950fd48 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-O.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - O)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index - O

    obsoleteOldStyleHistogramProto.Tensorflow.Core.Framework.Summary
    op 
    1 (Function)Proto.Tensorflow.Core.Framework.NodeDef
    2 (Function)Proto.Tensorflow.Core.Framework.OpDef
    OpDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDef'ArgDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDef'AttrDef 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OpDeprecation 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    operationTimeoutInMsProto.Tensorflow.Core.Protobuf.Config
    OpList 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.OpDef
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.OpDef
    OptimizerOptions 
    1 (Type/Class)Proto.Tensorflow.Core.Protobuf.Config
    2 (Data Constructor)Proto.Tensorflow.Core.Protobuf.Config
    optimizerOptionsProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'DEFAULTProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'GlobalJitLevelProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'L0Proto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'L1Proto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'LevelProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'OFFProto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'ON_1Proto.Tensorflow.Core.Protobuf.Config
    OptimizerOptions'ON_2Proto.Tensorflow.Core.Protobuf.Config
    optLevelProto.Tensorflow.Core.Protobuf.Config
    outputArgProto.Tensorflow.Core.Framework.OpDef
    outputPartitionGraphsProto.Tensorflow.Core.Protobuf.Config
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html index 27bb76a..c5d282f 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-P.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - P)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html index 2a83347..dccaa44 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-R.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - R)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html index cb40582..6cff79d 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-S.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - S)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    Index - S

    sProto.Tensorflow.Core.Framework.AttrValue
    sampleRateProto.Tensorflow.Core.Framework.Summary
    scomplexValProto.Tensorflow.Core.Framework.Tensor
    sessionInterOpThreadPoolProto.Tensorflow.Core.Protobuf.Config
    SessionLog 
    1 (Type/Class)Proto.Tensorflow.Core.Util.Event
    2 (Data Constructor)Proto.Tensorflow.Core.Util.Event
    sessionLogProto.Tensorflow.Core.Util.Event
    SessionLog'CHECKPOINTProto.Tensorflow.Core.Util.Event
    SessionLog'SessionStatusProto.Tensorflow.Core.Util.Event
    SessionLog'STARTProto.Tensorflow.Core.Util.Event
    SessionLog'STATUS_UNSPECIFIEDProto.Tensorflow.Core.Util.Event
    SessionLog'STOPProto.Tensorflow.Core.Util.Event
    shapeProto.Tensorflow.Core.Framework.AttrValue
    simpleValueProto.Tensorflow.Core.Framework.Summary
    sizeProto.Tensorflow.Core.Framework.TensorShape
    statusProto.Tensorflow.Core.Util.Event
    stepProto.Tensorflow.Core.Util.Event
    stepStatsProto.Tensorflow.Core.Protobuf.Config
    stringValProto.Tensorflow.Core.Framework.Tensor
    sumProto.Tensorflow.Core.Framework.Summary
    Summary 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
    summary 
    1 (Function)Proto.Tensorflow.Core.Util.Event
    2 (Function)Proto.Tensorflow.Core.Framework.OpDef
    Summary'Audio 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
    Summary'Image 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
    Summary'Value 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
    SummaryDescription 
    1 (Type/Class)Proto.Tensorflow.Core.Framework.Summary
    2 (Data Constructor)Proto.Tensorflow.Core.Framework.Summary
    sumSquaresProto.Tensorflow.Core.Framework.Summary
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html index ac03d34..9984df7 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-T.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - T)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html index c48aaf7..e03d8c5 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-U.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - U)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html index 1f9d2fe..bbd3137 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-V.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - V)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-W.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-W.html new file mode 100644 index 0000000..54d8f04 --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index-W.html @@ -0,0 +1,4 @@ +tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index - W)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html index a016a72..b26c50a 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. (Index)

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-proto-0.1.0.0/index-frames.html index a3a63f9..84031bb 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/index-frames.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/index-frames.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers. \ No newline at end of file + \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/index.html b/docs/haddock/tensorflow-proto-0.1.0.0/index.html index fb07a5b..dccea21 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/index.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/index.html @@ -1,4 +1,4 @@ tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file +

    tensorflow-proto-0.1.0.0: TensorFlow protocol buffers.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Summary.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Summary.html new file mode 100644 index 0000000..5351bef --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Framework-Summary.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Framework.Summary

    Proto.Tensorflow.Core.Framework.Summary

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html index 6854d28..4626817 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Protobuf-Config.html @@ -1,4 +1,4 @@ Proto.Tensorflow.Core.Protobuf.Config

    Proto.Tensorflow.Core.Protobuf.Config

    \ No newline at end of file +

    Proto.Tensorflow.Core.Protobuf.Config

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-Event.html b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-Event.html new file mode 100644 index 0000000..4af986d --- /dev/null +++ b/docs/haddock/tensorflow-proto-0.1.0.0/mini_Proto-Tensorflow-Core-Util-Event.html @@ -0,0 +1,4 @@ +Proto.Tensorflow.Core.Util.Event

    Proto.Tensorflow.Core.Util.Event

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AllocationDescription.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AllocationDescription.html deleted file mode 100644 index 8cbaaea..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AllocationDescription.html +++ /dev/null @@ -1,233 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/AllocationDescription.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/allocation_description.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.AllocationDescription where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -
    -data AllocationDescription = AllocationDescription{_AllocationDescription'requestedBytes
    -                                                   :: Data.Int.Int64,
    -                                                   _AllocationDescription'allocatedBytes ::
    -                                                   Data.Int.Int64,
    -                                                   _AllocationDescription'allocatorName ::
    -                                                   Data.Text.Text,
    -                                                   _AllocationDescription'allocationId ::
    -                                                   Data.Int.Int64,
    -                                                   _AllocationDescription'hasSingleReference ::
    -                                                   Prelude.Bool,
    -                                                   _AllocationDescription'ptr :: Data.Word.Word64}
    -                           deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance
    -     Data.ProtoLens.Field "requestedBytes" AllocationDescription =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "requestedBytes"
    -         AllocationDescription AllocationDescription where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AllocationDescription'requestedBytes
    -              (\ x__ y__ -> x__{_AllocationDescription'requestedBytes = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "allocatedBytes" AllocationDescription =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "allocatedBytes"
    -         AllocationDescription AllocationDescription where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AllocationDescription'allocatedBytes
    -              (\ x__ y__ -> x__{_AllocationDescription'allocatedBytes = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "allocatorName" AllocationDescription =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "allocatorName"
    -         AllocationDescription AllocationDescription where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AllocationDescription'allocatorName
    -              (\ x__ y__ -> x__{_AllocationDescription'allocatorName = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "allocationId" AllocationDescription =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "allocationId"
    -         AllocationDescription AllocationDescription where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AllocationDescription'allocationId
    -              (\ x__ y__ -> x__{_AllocationDescription'allocationId = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "hasSingleReference" AllocationDescription =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "hasSingleReference"
    -         AllocationDescription AllocationDescription where
    -        field _
    -          = Lens.Family2.Unchecked.lens
    -              _AllocationDescription'hasSingleReference
    -              (\ x__ y__ -> x__{_AllocationDescription'hasSingleReference = y__})
    -
    -type instance Data.ProtoLens.Field "ptr" AllocationDescription =
    -     Data.Word.Word64
    -
    -instance Data.ProtoLens.HasField "ptr" AllocationDescription
    -         AllocationDescription where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AllocationDescription'ptr
    -              (\ x__ y__ -> x__{_AllocationDescription'ptr = y__})
    -
    -instance Data.Default.Class.Default AllocationDescription where
    -        def
    -          = AllocationDescription{_AllocationDescription'requestedBytes =
    -                                    Data.ProtoLens.fieldDefault,
    -                                  _AllocationDescription'allocatedBytes =
    -                                    Data.ProtoLens.fieldDefault,
    -                                  _AllocationDescription'allocatorName =
    -                                    Data.ProtoLens.fieldDefault,
    -                                  _AllocationDescription'allocationId = Data.ProtoLens.fieldDefault,
    -                                  _AllocationDescription'hasSingleReference =
    -                                    Data.ProtoLens.fieldDefault,
    -                                  _AllocationDescription'ptr = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message AllocationDescription where
    -        descriptor
    -          = let requestedBytes__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "requested_bytes"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional requestedBytes)
    -                allocatedBytes__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "allocated_bytes"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatedBytes)
    -                allocatorName__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "allocator_name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorName)
    -                allocationId__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "allocation_id"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocationId)
    -                hasSingleReference__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "has_single_reference"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         hasSingleReference)
    -                ptr__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "ptr"
    -                      (Data.ProtoLens.UInt64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional ptr)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, requestedBytes__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, allocatedBytes__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, allocatorName__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, allocationId__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, hasSingleReference__field_descriptor),
    -                    (Data.ProtoLens.Tag 6, ptr__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("requested_bytes", requestedBytes__field_descriptor),
    -                    ("allocated_bytes", allocatedBytes__field_descriptor),
    -                    ("allocator_name", allocatorName__field_descriptor),
    -                    ("allocation_id", allocationId__field_descriptor),
    -                    ("has_single_reference", hasSingleReference__field_descriptor),
    -                    ("ptr", ptr__field_descriptor)])
    -
    -allocatedBytes ::
    -               forall msg msg' .
    -                 Data.ProtoLens.HasField "allocatedBytes" msg msg' =>
    -                 Lens.Family2.Lens msg msg'
    -                   (Data.ProtoLens.Field "allocatedBytes" msg)
    -                   (Data.ProtoLens.Field "allocatedBytes" msg')
    -allocatedBytes
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "allocatedBytes")
    -
    -allocationId ::
    -             forall msg msg' .
    -               Data.ProtoLens.HasField "allocationId" msg msg' =>
    -               Lens.Family2.Lens msg msg'
    -                 (Data.ProtoLens.Field "allocationId" msg)
    -                 (Data.ProtoLens.Field "allocationId" msg')
    -allocationId
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "allocationId")
    -
    -allocatorName ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "allocatorName" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "allocatorName" msg)
    -                  (Data.ProtoLens.Field "allocatorName" msg')
    -allocatorName
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "allocatorName")
    -
    -hasSingleReference ::
    -                   forall msg msg' .
    -                     Data.ProtoLens.HasField "hasSingleReference" msg msg' =>
    -                     Lens.Family2.Lens msg msg'
    -                       (Data.ProtoLens.Field "hasSingleReference" msg)
    -                       (Data.ProtoLens.Field "hasSingleReference" msg')
    -hasSingleReference
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "hasSingleReference")
    -
    -ptr ::
    -    forall msg msg' . Data.ProtoLens.HasField "ptr" msg msg' =>
    -      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "ptr" msg)
    -        (Data.ProtoLens.Field "ptr" msg')
    -ptr
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "ptr")
    -
    -requestedBytes ::
    -               forall msg msg' .
    -                 Data.ProtoLens.HasField "requestedBytes" msg msg' =>
    -                 Lens.Family2.Lens msg msg'
    -                   (Data.ProtoLens.Field "requestedBytes" msg)
    -                   (Data.ProtoLens.Field "requestedBytes" msg')
    -requestedBytes
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "requestedBytes")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AttrValue.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AttrValue.html deleted file mode 100644 index ca7cc31..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-AttrValue.html +++ /dev/null @@ -1,762 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/AttrValue.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/attr_value.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.AttrValue where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -import qualified Proto.Tensorflow.Core.Framework.Tensor
    -import qualified Proto.Tensorflow.Core.Framework.TensorShape
    -import qualified Proto.Tensorflow.Core.Framework.Types
    -
    -data AttrValue = AttrValue{_AttrValue's ::
    -                           Prelude.Maybe Data.ByteString.ByteString,
    -                           _AttrValue'i :: Prelude.Maybe Data.Int.Int64,
    -                           _AttrValue'f :: Prelude.Maybe Prelude.Float,
    -                           _AttrValue'b :: Prelude.Maybe Prelude.Bool,
    -                           _AttrValue'type' ::
    -                           Prelude.Maybe Proto.Tensorflow.Core.Framework.Types.DataType,
    -                           _AttrValue'shape ::
    -                           Prelude.Maybe
    -                             Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
    -                           _AttrValue'tensor ::
    -                           Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto,
    -                           _AttrValue'list :: Prelude.Maybe AttrValue'ListValue,
    -                           _AttrValue'func :: Prelude.Maybe NameAttrList,
    -                           _AttrValue'placeholder :: Prelude.Maybe Data.Text.Text}
    -               deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "s" AttrValue =
    -     Data.ByteString.ByteString
    -
    -instance Data.ProtoLens.HasField "s" AttrValue AttrValue where
    -        field _
    -          = (Prelude..) maybe's
    -              (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault)
    -
    -type instance Data.ProtoLens.Field "maybe's" AttrValue =
    -     Prelude.Maybe Data.ByteString.ByteString
    -
    -instance Data.ProtoLens.HasField "maybe's" AttrValue AttrValue
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue's
    -              (\ x__ y__ -> x__{_AttrValue's = y__})
    -
    -type instance Data.ProtoLens.Field "i" AttrValue = Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "i" AttrValue AttrValue where
    -        field _
    -          = (Prelude..) maybe'i
    -              (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault)
    -
    -type instance Data.ProtoLens.Field "maybe'i" AttrValue =
    -     Prelude.Maybe Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "maybe'i" AttrValue AttrValue
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'i
    -              (\ x__ y__ -> x__{_AttrValue'i = y__})
    -
    -type instance Data.ProtoLens.Field "f" AttrValue = Prelude.Float
    -
    -instance Data.ProtoLens.HasField "f" AttrValue AttrValue where
    -        field _
    -          = (Prelude..) maybe'f
    -              (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault)
    -
    -type instance Data.ProtoLens.Field "maybe'f" AttrValue =
    -     Prelude.Maybe Prelude.Float
    -
    -instance Data.ProtoLens.HasField "maybe'f" AttrValue AttrValue
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'f
    -              (\ x__ y__ -> x__{_AttrValue'f = y__})
    -
    -type instance Data.ProtoLens.Field "b" AttrValue = Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "b" AttrValue AttrValue where
    -        field _
    -          = (Prelude..) maybe'b
    -              (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault)
    -
    -type instance Data.ProtoLens.Field "maybe'b" AttrValue =
    -     Prelude.Maybe Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "maybe'b" AttrValue AttrValue
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'b
    -              (\ x__ y__ -> x__{_AttrValue'b = y__})
    -
    -type instance Data.ProtoLens.Field "type'" AttrValue =
    -     Proto.Tensorflow.Core.Framework.Types.DataType
    -
    -instance Data.ProtoLens.HasField "type'" AttrValue AttrValue where
    -        field _
    -          = (Prelude..) maybe'type'
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'type'" AttrValue =
    -     Prelude.Maybe Proto.Tensorflow.Core.Framework.Types.DataType
    -
    -instance Data.ProtoLens.HasField "maybe'type'" AttrValue AttrValue
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'type'
    -              (\ x__ y__ -> x__{_AttrValue'type' = y__})
    -
    -type instance Data.ProtoLens.Field "shape" AttrValue =
    -     Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    -
    -instance Data.ProtoLens.HasField "shape" AttrValue AttrValue where
    -        field _
    -          = (Prelude..) maybe'shape
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'shape" AttrValue =
    -     Prelude.Maybe
    -       Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    -
    -instance Data.ProtoLens.HasField "maybe'shape" AttrValue AttrValue
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'shape
    -              (\ x__ y__ -> x__{_AttrValue'shape = y__})
    -
    -type instance Data.ProtoLens.Field "tensor" AttrValue =
    -     Proto.Tensorflow.Core.Framework.Tensor.TensorProto
    -
    -instance Data.ProtoLens.HasField "tensor" AttrValue AttrValue where
    -        field _
    -          = (Prelude..) maybe'tensor
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'tensor" AttrValue =
    -     Prelude.Maybe Proto.Tensorflow.Core.Framework.Tensor.TensorProto
    -
    -instance Data.ProtoLens.HasField "maybe'tensor" AttrValue AttrValue
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'tensor
    -              (\ x__ y__ -> x__{_AttrValue'tensor = y__})
    -
    -type instance Data.ProtoLens.Field "list" AttrValue =
    -     AttrValue'ListValue
    -
    -instance Data.ProtoLens.HasField "list" AttrValue AttrValue where
    -        field _
    -          = (Prelude..) maybe'list
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'list" AttrValue =
    -     Prelude.Maybe AttrValue'ListValue
    -
    -instance Data.ProtoLens.HasField "maybe'list" AttrValue AttrValue
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'list
    -              (\ x__ y__ -> x__{_AttrValue'list = y__})
    -
    -type instance Data.ProtoLens.Field "func" AttrValue = NameAttrList
    -
    -instance Data.ProtoLens.HasField "func" AttrValue AttrValue where
    -        field _
    -          = (Prelude..) maybe'func
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'func" AttrValue =
    -     Prelude.Maybe NameAttrList
    -
    -instance Data.ProtoLens.HasField "maybe'func" AttrValue AttrValue
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'func
    -              (\ x__ y__ -> x__{_AttrValue'func = y__})
    -
    -type instance Data.ProtoLens.Field "placeholder" AttrValue =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "placeholder" AttrValue AttrValue
    -         where
    -        field _
    -          = (Prelude..) maybe'placeholder
    -              (Data.ProtoLens.maybeLens Data.ProtoLens.fieldDefault)
    -
    -type instance Data.ProtoLens.Field "maybe'placeholder" AttrValue =
    -     Prelude.Maybe Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "maybe'placeholder" AttrValue
    -         AttrValue where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'placeholder
    -              (\ x__ y__ -> x__{_AttrValue'placeholder = y__})
    -
    -instance Data.Default.Class.Default AttrValue where
    -        def
    -          = AttrValue{_AttrValue's = Prelude.Nothing,
    -                      _AttrValue'i = Prelude.Nothing, _AttrValue'f = Prelude.Nothing,
    -                      _AttrValue'b = Prelude.Nothing, _AttrValue'type' = Prelude.Nothing,
    -                      _AttrValue'shape = Prelude.Nothing,
    -                      _AttrValue'tensor = Prelude.Nothing,
    -                      _AttrValue'list = Prelude.Nothing,
    -                      _AttrValue'func = Prelude.Nothing,
    -                      _AttrValue'placeholder = Prelude.Nothing}
    -
    -instance Data.ProtoLens.Message AttrValue where
    -        descriptor
    -          = let s__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "s"
    -                      (Data.ProtoLens.BytesField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
    -                      (Data.ProtoLens.OptionalField maybe's)
    -                i__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "i"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.OptionalField maybe'i)
    -                f__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "f"
    -                      (Data.ProtoLens.FloatField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
    -                      (Data.ProtoLens.OptionalField maybe'f)
    -                b__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "b"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.OptionalField maybe'b)
    -                type'__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "type"
    -                      (Data.ProtoLens.EnumField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.Types.DataType)
    -                      (Data.ProtoLens.OptionalField maybe'type')
    -                shape__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "shape"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
    -                      (Data.ProtoLens.OptionalField maybe'shape)
    -                tensor__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "tensor"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.Tensor.TensorProto)
    -                      (Data.ProtoLens.OptionalField maybe'tensor)
    -                list__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "list"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor AttrValue'ListValue)
    -                      (Data.ProtoLens.OptionalField maybe'list)
    -                func__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "func"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor NameAttrList)
    -                      (Data.ProtoLens.OptionalField maybe'func)
    -                placeholder__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "placeholder"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.OptionalField maybe'placeholder)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 2, s__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, i__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, f__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, b__field_descriptor),
    -                    (Data.ProtoLens.Tag 6, type'__field_descriptor),
    -                    (Data.ProtoLens.Tag 7, shape__field_descriptor),
    -                    (Data.ProtoLens.Tag 8, tensor__field_descriptor),
    -                    (Data.ProtoLens.Tag 1, list__field_descriptor),
    -                    (Data.ProtoLens.Tag 10, func__field_descriptor),
    -                    (Data.ProtoLens.Tag 9, placeholder__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("s", s__field_descriptor), ("i", i__field_descriptor),
    -                    ("f", f__field_descriptor), ("b", b__field_descriptor),
    -                    ("type", type'__field_descriptor),
    -                    ("shape", shape__field_descriptor),
    -                    ("tensor", tensor__field_descriptor),
    -                    ("list", list__field_descriptor), ("func", func__field_descriptor),
    -                    ("placeholder", placeholder__field_descriptor)])
    -
    -data AttrValue'ListValue = AttrValue'ListValue{_AttrValue'ListValue's
    -                                               :: [Data.ByteString.ByteString],
    -                                               _AttrValue'ListValue'i :: [Data.Int.Int64],
    -                                               _AttrValue'ListValue'f :: [Prelude.Float],
    -                                               _AttrValue'ListValue'b :: [Prelude.Bool],
    -                                               _AttrValue'ListValue'type' ::
    -                                               [Proto.Tensorflow.Core.Framework.Types.DataType],
    -                                               _AttrValue'ListValue'shape ::
    -                                               [Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto],
    -                                               _AttrValue'ListValue'tensor ::
    -                                               [Proto.Tensorflow.Core.Framework.Tensor.TensorProto]}
    -                         deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "s" AttrValue'ListValue =
    -     [Data.ByteString.ByteString]
    -
    -instance Data.ProtoLens.HasField "s" AttrValue'ListValue
    -         AttrValue'ListValue where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'ListValue's
    -              (\ x__ y__ -> x__{_AttrValue'ListValue's = y__})
    -
    -type instance Data.ProtoLens.Field "i" AttrValue'ListValue =
    -     [Data.Int.Int64]
    -
    -instance Data.ProtoLens.HasField "i" AttrValue'ListValue
    -         AttrValue'ListValue where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'i
    -              (\ x__ y__ -> x__{_AttrValue'ListValue'i = y__})
    -
    -type instance Data.ProtoLens.Field "f" AttrValue'ListValue =
    -     [Prelude.Float]
    -
    -instance Data.ProtoLens.HasField "f" AttrValue'ListValue
    -         AttrValue'ListValue where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'f
    -              (\ x__ y__ -> x__{_AttrValue'ListValue'f = y__})
    -
    -type instance Data.ProtoLens.Field "b" AttrValue'ListValue =
    -     [Prelude.Bool]
    -
    -instance Data.ProtoLens.HasField "b" AttrValue'ListValue
    -         AttrValue'ListValue where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'b
    -              (\ x__ y__ -> x__{_AttrValue'ListValue'b = y__})
    -
    -type instance Data.ProtoLens.Field "type'" AttrValue'ListValue =
    -     [Proto.Tensorflow.Core.Framework.Types.DataType]
    -
    -instance Data.ProtoLens.HasField "type'" AttrValue'ListValue
    -         AttrValue'ListValue where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'type'
    -              (\ x__ y__ -> x__{_AttrValue'ListValue'type' = y__})
    -
    -type instance Data.ProtoLens.Field "shape" AttrValue'ListValue =
    -     [Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto]
    -
    -instance Data.ProtoLens.HasField "shape" AttrValue'ListValue
    -         AttrValue'ListValue where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'shape
    -              (\ x__ y__ -> x__{_AttrValue'ListValue'shape = y__})
    -
    -type instance Data.ProtoLens.Field "tensor" AttrValue'ListValue =
    -     [Proto.Tensorflow.Core.Framework.Tensor.TensorProto]
    -
    -instance Data.ProtoLens.HasField "tensor" AttrValue'ListValue
    -         AttrValue'ListValue where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AttrValue'ListValue'tensor
    -              (\ x__ y__ -> x__{_AttrValue'ListValue'tensor = y__})
    -
    -instance Data.Default.Class.Default AttrValue'ListValue where
    -        def
    -          = AttrValue'ListValue{_AttrValue'ListValue's = [],
    -                                _AttrValue'ListValue'i = [], _AttrValue'ListValue'f = [],
    -                                _AttrValue'ListValue'b = [], _AttrValue'ListValue'type' = [],
    -                                _AttrValue'ListValue'shape = [], _AttrValue'ListValue'tensor = []}
    -
    -instance Data.ProtoLens.Message AttrValue'ListValue where
    -        descriptor
    -          = let s__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "s"
    -                      (Data.ProtoLens.BytesField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked s)
    -                i__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "i"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed i)
    -                f__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "f"
    -                      (Data.ProtoLens.FloatField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed f)
    -                b__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "b"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed b)
    -                type'__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "type"
    -                      (Data.ProtoLens.EnumField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.Types.DataType)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed type')
    -                shape__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "shape"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked shape)
    -                tensor__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "tensor"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.Tensor.TensorProto)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked tensor)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 2, s__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, i__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, f__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, b__field_descriptor),
    -                    (Data.ProtoLens.Tag 6, type'__field_descriptor),
    -                    (Data.ProtoLens.Tag 7, shape__field_descriptor),
    -                    (Data.ProtoLens.Tag 8, tensor__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("s", s__field_descriptor), ("i", i__field_descriptor),
    -                    ("f", f__field_descriptor), ("b", b__field_descriptor),
    -                    ("type", type'__field_descriptor),
    -                    ("shape", shape__field_descriptor),
    -                    ("tensor", tensor__field_descriptor)])
    -
    -data NameAttrList = NameAttrList{_NameAttrList'name ::
    -                                 Data.Text.Text,
    -                                 _NameAttrList'attr :: Data.Map.Map Data.Text.Text AttrValue}
    -                  deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "name" NameAttrList =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "name" NameAttrList NameAttrList
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NameAttrList'name
    -              (\ x__ y__ -> x__{_NameAttrList'name = y__})
    -
    -type instance Data.ProtoLens.Field "attr" NameAttrList =
    -     Data.Map.Map Data.Text.Text AttrValue
    -
    -instance Data.ProtoLens.HasField "attr" NameAttrList NameAttrList
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NameAttrList'attr
    -              (\ x__ y__ -> x__{_NameAttrList'attr = y__})
    -
    -instance Data.Default.Class.Default NameAttrList where
    -        def
    -          = NameAttrList{_NameAttrList'name = Data.ProtoLens.fieldDefault,
    -                         _NameAttrList'attr = Data.Map.empty}
    -
    -instance Data.ProtoLens.Message NameAttrList where
    -        descriptor
    -          = let name__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    -                attr__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "attr"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor NameAttrList'AttrEntry)
    -                      (Data.ProtoLens.MapField key value attr)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, attr__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("name", name__field_descriptor),
    -                    ("attr", attr__field_descriptor)])
    -
    -data NameAttrList'AttrEntry = NameAttrList'AttrEntry{_NameAttrList'AttrEntry'key
    -                                                     :: Data.Text.Text,
    -                                                     _NameAttrList'AttrEntry'value ::
    -                                                     Prelude.Maybe AttrValue}
    -                            deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "key" NameAttrList'AttrEntry =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "key" NameAttrList'AttrEntry
    -         NameAttrList'AttrEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NameAttrList'AttrEntry'key
    -              (\ x__ y__ -> x__{_NameAttrList'AttrEntry'key = y__})
    -
    -type instance Data.ProtoLens.Field "value" NameAttrList'AttrEntry =
    -     AttrValue
    -
    -instance Data.ProtoLens.HasField "value" NameAttrList'AttrEntry
    -         NameAttrList'AttrEntry where
    -        field _
    -          = (Prelude..) maybe'value
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance
    -     Data.ProtoLens.Field "maybe'value" NameAttrList'AttrEntry =
    -     Prelude.Maybe AttrValue
    -
    -instance Data.ProtoLens.HasField "maybe'value"
    -         NameAttrList'AttrEntry NameAttrList'AttrEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NameAttrList'AttrEntry'value
    -              (\ x__ y__ -> x__{_NameAttrList'AttrEntry'value = y__})
    -
    -instance Data.Default.Class.Default NameAttrList'AttrEntry where
    -        def
    -          = NameAttrList'AttrEntry{_NameAttrList'AttrEntry'key =
    -                                     Data.ProtoLens.fieldDefault,
    -                                   _NameAttrList'AttrEntry'value = Prelude.Nothing}
    -
    -instance Data.ProtoLens.Message NameAttrList'AttrEntry where
    -        descriptor
    -          = let key__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "key"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
    -                value__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "value"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor AttrValue)
    -                      (Data.ProtoLens.OptionalField maybe'value)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("key", key__field_descriptor),
    -                    ("value", value__field_descriptor)])
    -
    -attr ::
    -     forall msg msg' . Data.ProtoLens.HasField "attr" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "attr" msg)
    -         (Data.ProtoLens.Field "attr" msg')
    -attr
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "attr")
    -
    -b ::
    -  forall msg msg' . Data.ProtoLens.HasField "b" msg msg' =>
    -    Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "b" msg)
    -      (Data.ProtoLens.Field "b" msg')
    -b = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "b")
    -
    -f ::
    -  forall msg msg' . Data.ProtoLens.HasField "f" msg msg' =>
    -    Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "f" msg)
    -      (Data.ProtoLens.Field "f" msg')
    -f = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "f")
    -
    -func ::
    -     forall msg msg' . Data.ProtoLens.HasField "func" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "func" msg)
    -         (Data.ProtoLens.Field "func" msg')
    -func
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "func")
    -
    -i ::
    -  forall msg msg' . Data.ProtoLens.HasField "i" msg msg' =>
    -    Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "i" msg)
    -      (Data.ProtoLens.Field "i" msg')
    -i = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "i")
    -
    -key ::
    -    forall msg msg' . Data.ProtoLens.HasField "key" msg msg' =>
    -      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "key" msg)
    -        (Data.ProtoLens.Field "key" msg')
    -key
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "key")
    -
    -list ::
    -     forall msg msg' . Data.ProtoLens.HasField "list" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "list" msg)
    -         (Data.ProtoLens.Field "list" msg')
    -list
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "list")
    -
    -maybe'b ::
    -        forall msg msg' . Data.ProtoLens.HasField "maybe'b" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'b" msg)
    -            (Data.ProtoLens.Field "maybe'b" msg')
    -maybe'b
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'b")
    -
    -maybe'f ::
    -        forall msg msg' . Data.ProtoLens.HasField "maybe'f" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'f" msg)
    -            (Data.ProtoLens.Field "maybe'f" msg')
    -maybe'f
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'f")
    -
    -maybe'func ::
    -           forall msg msg' . Data.ProtoLens.HasField "maybe'func" msg msg' =>
    -             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'func" msg)
    -               (Data.ProtoLens.Field "maybe'func" msg')
    -maybe'func
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'func")
    -
    -maybe'i ::
    -        forall msg msg' . Data.ProtoLens.HasField "maybe'i" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'i" msg)
    -            (Data.ProtoLens.Field "maybe'i" msg')
    -maybe'i
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'i")
    -
    -maybe'list ::
    -           forall msg msg' . Data.ProtoLens.HasField "maybe'list" msg msg' =>
    -             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'list" msg)
    -               (Data.ProtoLens.Field "maybe'list" msg')
    -maybe'list
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'list")
    -
    -maybe'placeholder ::
    -                  forall msg msg' .
    -                    Data.ProtoLens.HasField "maybe'placeholder" msg msg' =>
    -                    Lens.Family2.Lens msg msg'
    -                      (Data.ProtoLens.Field "maybe'placeholder" msg)
    -                      (Data.ProtoLens.Field "maybe'placeholder" msg')
    -maybe'placeholder
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'placeholder")
    -
    -maybe's ::
    -        forall msg msg' . Data.ProtoLens.HasField "maybe's" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe's" msg)
    -            (Data.ProtoLens.Field "maybe's" msg')
    -maybe's
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe's")
    -
    -maybe'shape ::
    -            forall msg msg' . Data.ProtoLens.HasField "maybe'shape" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'shape" msg)
    -                (Data.ProtoLens.Field "maybe'shape" msg')
    -maybe'shape
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'shape")
    -
    -maybe'tensor ::
    -             forall msg msg' .
    -               Data.ProtoLens.HasField "maybe'tensor" msg msg' =>
    -               Lens.Family2.Lens msg msg'
    -                 (Data.ProtoLens.Field "maybe'tensor" msg)
    -                 (Data.ProtoLens.Field "maybe'tensor" msg')
    -maybe'tensor
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'tensor")
    -
    -maybe'type' ::
    -            forall msg msg' . Data.ProtoLens.HasField "maybe'type'" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'type'" msg)
    -                (Data.ProtoLens.Field "maybe'type'" msg')
    -maybe'type'
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'type'")
    -
    -maybe'value ::
    -            forall msg msg' . Data.ProtoLens.HasField "maybe'value" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'value" msg)
    -                (Data.ProtoLens.Field "maybe'value" msg')
    -maybe'value
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'value")
    -
    -name ::
    -     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    -         (Data.ProtoLens.Field "name" msg')
    -name
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    -
    -placeholder ::
    -            forall msg msg' . Data.ProtoLens.HasField "placeholder" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "placeholder" msg)
    -                (Data.ProtoLens.Field "placeholder" msg')
    -placeholder
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "placeholder")
    -
    -s ::
    -  forall msg msg' . Data.ProtoLens.HasField "s" msg msg' =>
    -    Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "s" msg)
    -      (Data.ProtoLens.Field "s" msg')
    -s = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "s")
    -
    -shape ::
    -      forall msg msg' . Data.ProtoLens.HasField "shape" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "shape" msg)
    -          (Data.ProtoLens.Field "shape" msg')
    -shape
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "shape")
    -
    -tensor ::
    -       forall msg msg' . Data.ProtoLens.HasField "tensor" msg msg' =>
    -         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "tensor" msg)
    -           (Data.ProtoLens.Field "tensor" msg')
    -tensor
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "tensor")
    -
    -type' ::
    -      forall msg msg' . Data.ProtoLens.HasField "type'" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "type'" msg)
    -          (Data.ProtoLens.Field "type'" msg')
    -type'
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "type'")
    -
    -value ::
    -      forall msg msg' . Data.ProtoLens.HasField "value" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "value" msg)
    -          (Data.ProtoLens.Field "value" msg')
    -value
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "value")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-CostGraph.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-CostGraph.html deleted file mode 100644 index 53adfad..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-CostGraph.html +++ /dev/null @@ -1,570 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/CostGraph.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/cost_graph.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.CostGraph where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -import qualified Proto.Tensorflow.Core.Framework.TensorShape
    -import qualified Proto.Tensorflow.Core.Framework.Types
    -
    -data CostGraphDef = CostGraphDef{_CostGraphDef'node ::
    -                                 [CostGraphDef'Node]}
    -                  deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "node" CostGraphDef =
    -     [CostGraphDef'Node]
    -
    -instance Data.ProtoLens.HasField "node" CostGraphDef CostGraphDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'node
    -              (\ x__ y__ -> x__{_CostGraphDef'node = y__})
    -
    -instance Data.Default.Class.Default CostGraphDef where
    -        def = CostGraphDef{_CostGraphDef'node = []}
    -
    -instance Data.ProtoLens.Message CostGraphDef where
    -        descriptor
    -          = let node__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "node"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor CostGraphDef'Node)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked node)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, node__field_descriptor)])
    -                (Data.Map.fromList [("node", node__field_descriptor)])
    -
    -data CostGraphDef'Node = CostGraphDef'Node{_CostGraphDef'Node'name
    -                                           :: Data.Text.Text,
    -                                           _CostGraphDef'Node'device :: Data.Text.Text,
    -                                           _CostGraphDef'Node'id :: Data.Int.Int32,
    -                                           _CostGraphDef'Node'inputInfo ::
    -                                           [CostGraphDef'Node'InputInfo],
    -                                           _CostGraphDef'Node'outputInfo ::
    -                                           [CostGraphDef'Node'OutputInfo],
    -                                           _CostGraphDef'Node'temporaryMemorySize :: Data.Int.Int64,
    -                                           _CostGraphDef'Node'computeCost :: Data.Int.Int64,
    -                                           _CostGraphDef'Node'isFinal :: Prelude.Bool,
    -                                           _CostGraphDef'Node'controlInput :: [Data.Int.Int32]}
    -                       deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "name" CostGraphDef'Node =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "name" CostGraphDef'Node
    -         CostGraphDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'name
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'name = y__})
    -
    -type instance Data.ProtoLens.Field "device" CostGraphDef'Node =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "device" CostGraphDef'Node
    -         CostGraphDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'device
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'device = y__})
    -
    -type instance Data.ProtoLens.Field "id" CostGraphDef'Node =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "id" CostGraphDef'Node
    -         CostGraphDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'id
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'id = y__})
    -
    -type instance Data.ProtoLens.Field "inputInfo" CostGraphDef'Node =
    -     [CostGraphDef'Node'InputInfo]
    -
    -instance Data.ProtoLens.HasField "inputInfo" CostGraphDef'Node
    -         CostGraphDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'inputInfo
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'inputInfo = y__})
    -
    -type instance Data.ProtoLens.Field "outputInfo" CostGraphDef'Node =
    -     [CostGraphDef'Node'OutputInfo]
    -
    -instance Data.ProtoLens.HasField "outputInfo" CostGraphDef'Node
    -         CostGraphDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'outputInfo
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'outputInfo = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "temporaryMemorySize" CostGraphDef'Node =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "temporaryMemorySize"
    -         CostGraphDef'Node CostGraphDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens
    -              _CostGraphDef'Node'temporaryMemorySize
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'temporaryMemorySize = y__})
    -
    -type instance Data.ProtoLens.Field "computeCost" CostGraphDef'Node
    -     = Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "computeCost" CostGraphDef'Node
    -         CostGraphDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'computeCost
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'computeCost = y__})
    -
    -type instance Data.ProtoLens.Field "isFinal" CostGraphDef'Node =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "isFinal" CostGraphDef'Node
    -         CostGraphDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'isFinal
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'isFinal = y__})
    -
    -type instance Data.ProtoLens.Field "controlInput" CostGraphDef'Node
    -     = [Data.Int.Int32]
    -
    -instance Data.ProtoLens.HasField "controlInput" CostGraphDef'Node
    -         CostGraphDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'controlInput
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'controlInput = y__})
    -
    -instance Data.Default.Class.Default CostGraphDef'Node where
    -        def
    -          = CostGraphDef'Node{_CostGraphDef'Node'name =
    -                                Data.ProtoLens.fieldDefault,
    -                              _CostGraphDef'Node'device = Data.ProtoLens.fieldDefault,
    -                              _CostGraphDef'Node'id = Data.ProtoLens.fieldDefault,
    -                              _CostGraphDef'Node'inputInfo = [],
    -                              _CostGraphDef'Node'outputInfo = [],
    -                              _CostGraphDef'Node'temporaryMemorySize =
    -                                Data.ProtoLens.fieldDefault,
    -                              _CostGraphDef'Node'computeCost = Data.ProtoLens.fieldDefault,
    -                              _CostGraphDef'Node'isFinal = Data.ProtoLens.fieldDefault,
    -                              _CostGraphDef'Node'controlInput = []}
    -
    -instance Data.ProtoLens.Message CostGraphDef'Node where
    -        descriptor
    -          = let name__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    -                device__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "device"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
    -                id__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "id"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional id)
    -                inputInfo__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "input_info"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor CostGraphDef'Node'InputInfo)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked inputInfo)
    -                outputInfo__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "output_info"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor CostGraphDef'Node'OutputInfo)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked outputInfo)
    -                temporaryMemorySize__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "temporary_memory_size"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         temporaryMemorySize)
    -                computeCost__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "compute_cost"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional computeCost)
    -                isFinal__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "is_final"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isFinal)
    -                controlInput__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "control_input"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked controlInput)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, device__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, id__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, inputInfo__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, outputInfo__field_descriptor),
    -                    (Data.ProtoLens.Tag 6, temporaryMemorySize__field_descriptor),
    -                    (Data.ProtoLens.Tag 9, computeCost__field_descriptor),
    -                    (Data.ProtoLens.Tag 7, isFinal__field_descriptor),
    -                    (Data.ProtoLens.Tag 8, controlInput__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("name", name__field_descriptor),
    -                    ("device", device__field_descriptor), ("id", id__field_descriptor),
    -                    ("input_info", inputInfo__field_descriptor),
    -                    ("output_info", outputInfo__field_descriptor),
    -                    ("temporary_memory_size", temporaryMemorySize__field_descriptor),
    -                    ("compute_cost", computeCost__field_descriptor),
    -                    ("is_final", isFinal__field_descriptor),
    -                    ("control_input", controlInput__field_descriptor)])
    -
    -data CostGraphDef'Node'InputInfo = CostGraphDef'Node'InputInfo{_CostGraphDef'Node'InputInfo'precedingNode
    -                                                               :: Data.Int.Int32,
    -                                                               _CostGraphDef'Node'InputInfo'precedingPort
    -                                                               :: Data.Int.Int32}
    -                                 deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance
    -     Data.ProtoLens.Field "precedingNode" CostGraphDef'Node'InputInfo =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "precedingNode"
    -         CostGraphDef'Node'InputInfo CostGraphDef'Node'InputInfo where
    -        field _
    -          = Lens.Family2.Unchecked.lens
    -              _CostGraphDef'Node'InputInfo'precedingNode
    -              (\ x__ y__ ->
    -                 x__{_CostGraphDef'Node'InputInfo'precedingNode = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "precedingPort" CostGraphDef'Node'InputInfo =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "precedingPort"
    -         CostGraphDef'Node'InputInfo CostGraphDef'Node'InputInfo where
    -        field _
    -          = Lens.Family2.Unchecked.lens
    -              _CostGraphDef'Node'InputInfo'precedingPort
    -              (\ x__ y__ ->
    -                 x__{_CostGraphDef'Node'InputInfo'precedingPort = y__})
    -
    -instance Data.Default.Class.Default CostGraphDef'Node'InputInfo
    -         where
    -        def
    -          = CostGraphDef'Node'InputInfo{_CostGraphDef'Node'InputInfo'precedingNode
    -                                          = Data.ProtoLens.fieldDefault,
    -                                        _CostGraphDef'Node'InputInfo'precedingPort =
    -                                          Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message CostGraphDef'Node'InputInfo where
    -        descriptor
    -          = let precedingNode__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "preceding_node"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional precedingNode)
    -                precedingPort__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "preceding_port"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional precedingPort)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, precedingNode__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, precedingPort__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("preceding_node", precedingNode__field_descriptor),
    -                    ("preceding_port", precedingPort__field_descriptor)])
    -
    -data CostGraphDef'Node'OutputInfo = CostGraphDef'Node'OutputInfo{_CostGraphDef'Node'OutputInfo'size
    -                                                                 :: Data.Int.Int64,
    -                                                                 _CostGraphDef'Node'OutputInfo'aliasInputPort
    -                                                                 :: Data.Int.Int64,
    -                                                                 _CostGraphDef'Node'OutputInfo'shape
    -                                                                 ::
    -                                                                 Prelude.Maybe
    -                                                                   Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
    -                                                                 _CostGraphDef'Node'OutputInfo'dtype
    -                                                                 ::
    -                                                                 Proto.Tensorflow.Core.Framework.Types.DataType}
    -                                  deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance
    -     Data.ProtoLens.Field "size" CostGraphDef'Node'OutputInfo =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "size"
    -         CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'OutputInfo'size
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'OutputInfo'size = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "aliasInputPort" CostGraphDef'Node'OutputInfo
    -     = Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "aliasInputPort"
    -         CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo where
    -        field _
    -          = Lens.Family2.Unchecked.lens
    -              _CostGraphDef'Node'OutputInfo'aliasInputPort
    -              (\ x__ y__ ->
    -                 x__{_CostGraphDef'Node'OutputInfo'aliasInputPort = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "shape" CostGraphDef'Node'OutputInfo =
    -     Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    -
    -instance Data.ProtoLens.HasField "shape"
    -         CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo where
    -        field _
    -          = (Prelude..) maybe'shape
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance
    -     Data.ProtoLens.Field "maybe'shape" CostGraphDef'Node'OutputInfo =
    -     Prelude.Maybe
    -       Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    -
    -instance Data.ProtoLens.HasField "maybe'shape"
    -         CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'OutputInfo'shape
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'OutputInfo'shape = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "dtype" CostGraphDef'Node'OutputInfo =
    -     Proto.Tensorflow.Core.Framework.Types.DataType
    -
    -instance Data.ProtoLens.HasField "dtype"
    -         CostGraphDef'Node'OutputInfo CostGraphDef'Node'OutputInfo where
    -        field _
    -          = Lens.Family2.Unchecked.lens _CostGraphDef'Node'OutputInfo'dtype
    -              (\ x__ y__ -> x__{_CostGraphDef'Node'OutputInfo'dtype = y__})
    -
    -instance Data.Default.Class.Default CostGraphDef'Node'OutputInfo
    -         where
    -        def
    -          = CostGraphDef'Node'OutputInfo{_CostGraphDef'Node'OutputInfo'size =
    -                                           Data.ProtoLens.fieldDefault,
    -                                         _CostGraphDef'Node'OutputInfo'aliasInputPort =
    -                                           Data.ProtoLens.fieldDefault,
    -                                         _CostGraphDef'Node'OutputInfo'shape = Prelude.Nothing,
    -                                         _CostGraphDef'Node'OutputInfo'dtype =
    -                                           Data.Default.Class.def}
    -
    -instance Data.ProtoLens.Message CostGraphDef'Node'OutputInfo where
    -        descriptor
    -          = let size__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "size"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional size)
    -                aliasInputPort__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "alias_input_port"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional aliasInputPort)
    -                shape__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "shape"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
    -                      (Data.ProtoLens.OptionalField maybe'shape)
    -                dtype__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "dtype"
    -                      (Data.ProtoLens.EnumField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.Types.DataType)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, size__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, aliasInputPort__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, shape__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, dtype__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("size", size__field_descriptor),
    -                    ("alias_input_port", aliasInputPort__field_descriptor),
    -                    ("shape", shape__field_descriptor),
    -                    ("dtype", dtype__field_descriptor)])
    -
    -aliasInputPort ::
    -               forall msg msg' .
    -                 Data.ProtoLens.HasField "aliasInputPort" msg msg' =>
    -                 Lens.Family2.Lens msg msg'
    -                   (Data.ProtoLens.Field "aliasInputPort" msg)
    -                   (Data.ProtoLens.Field "aliasInputPort" msg')
    -aliasInputPort
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "aliasInputPort")
    -
    -computeCost ::
    -            forall msg msg' . Data.ProtoLens.HasField "computeCost" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "computeCost" msg)
    -                (Data.ProtoLens.Field "computeCost" msg')
    -computeCost
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "computeCost")
    -
    -controlInput ::
    -             forall msg msg' .
    -               Data.ProtoLens.HasField "controlInput" msg msg' =>
    -               Lens.Family2.Lens msg msg'
    -                 (Data.ProtoLens.Field "controlInput" msg)
    -                 (Data.ProtoLens.Field "controlInput" msg')
    -controlInput
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "controlInput")
    -
    -device ::
    -       forall msg msg' . Data.ProtoLens.HasField "device" msg msg' =>
    -         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "device" msg)
    -           (Data.ProtoLens.Field "device" msg')
    -device
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "device")
    -
    -dtype ::
    -      forall msg msg' . Data.ProtoLens.HasField "dtype" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dtype" msg)
    -          (Data.ProtoLens.Field "dtype" msg')
    -dtype
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dtype")
    -
    -id ::
    -   forall msg msg' . Data.ProtoLens.HasField "id" msg msg' =>
    -     Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "id" msg)
    -       (Data.ProtoLens.Field "id" msg')
    -id
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "id")
    -
    -inputInfo ::
    -          forall msg msg' . Data.ProtoLens.HasField "inputInfo" msg msg' =>
    -            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "inputInfo" msg)
    -              (Data.ProtoLens.Field "inputInfo" msg')
    -inputInfo
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "inputInfo")
    -
    -isFinal ::
    -        forall msg msg' . Data.ProtoLens.HasField "isFinal" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "isFinal" msg)
    -            (Data.ProtoLens.Field "isFinal" msg')
    -isFinal
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "isFinal")
    -
    -maybe'shape ::
    -            forall msg msg' . Data.ProtoLens.HasField "maybe'shape" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'shape" msg)
    -                (Data.ProtoLens.Field "maybe'shape" msg')
    -maybe'shape
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'shape")
    -
    -name ::
    -     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    -         (Data.ProtoLens.Field "name" msg')
    -name
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    -
    -node ::
    -     forall msg msg' . Data.ProtoLens.HasField "node" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "node" msg)
    -         (Data.ProtoLens.Field "node" msg')
    -node
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "node")
    -
    -outputInfo ::
    -           forall msg msg' . Data.ProtoLens.HasField "outputInfo" msg msg' =>
    -             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "outputInfo" msg)
    -               (Data.ProtoLens.Field "outputInfo" msg')
    -outputInfo
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "outputInfo")
    -
    -precedingNode ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "precedingNode" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "precedingNode" msg)
    -                  (Data.ProtoLens.Field "precedingNode" msg')
    -precedingNode
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "precedingNode")
    -
    -precedingPort ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "precedingPort" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "precedingPort" msg)
    -                  (Data.ProtoLens.Field "precedingPort" msg')
    -precedingPort
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "precedingPort")
    -
    -shape ::
    -      forall msg msg' . Data.ProtoLens.HasField "shape" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "shape" msg)
    -          (Data.ProtoLens.Field "shape" msg')
    -shape
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "shape")
    -
    -size ::
    -     forall msg msg' . Data.ProtoLens.HasField "size" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "size" msg)
    -         (Data.ProtoLens.Field "size" msg')
    -size
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "size")
    -
    -temporaryMemorySize ::
    -                    forall msg msg' .
    -                      Data.ProtoLens.HasField "temporaryMemorySize" msg msg' =>
    -                      Lens.Family2.Lens msg msg'
    -                        (Data.ProtoLens.Field "temporaryMemorySize" msg)
    -                        (Data.ProtoLens.Field "temporaryMemorySize" msg')
    -temporaryMemorySize
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "temporaryMemorySize")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Function.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Function.html deleted file mode 100644 index d321269..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Function.html +++ /dev/null @@ -1,668 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/Function.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/function.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.Function where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -import qualified Proto.Tensorflow.Core.Framework.AttrValue
    -import qualified Proto.Tensorflow.Core.Framework.NodeDef
    -import qualified Proto.Tensorflow.Core.Framework.OpDef
    -
    -data FunctionDef = FunctionDef{_FunctionDef'signature ::
    -                               Prelude.Maybe Proto.Tensorflow.Core.Framework.OpDef.OpDef,
    -                               _FunctionDef'attr ::
    -                               Data.Map.Map Data.Text.Text
    -                                 Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
    -                               _FunctionDef'node :: [FunctionDef'Node],
    -                               _FunctionDef'nodeDef ::
    -                               [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef],
    -                               _FunctionDef'ret :: Data.Map.Map Data.Text.Text Data.Text.Text}
    -                 deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "signature" FunctionDef =
    -     Proto.Tensorflow.Core.Framework.OpDef.OpDef
    -
    -instance Data.ProtoLens.HasField "signature" FunctionDef
    -         FunctionDef where
    -        field _
    -          = (Prelude..) maybe'signature
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'signature" FunctionDef =
    -     Prelude.Maybe Proto.Tensorflow.Core.Framework.OpDef.OpDef
    -
    -instance Data.ProtoLens.HasField "maybe'signature" FunctionDef
    -         FunctionDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'signature
    -              (\ x__ y__ -> x__{_FunctionDef'signature = y__})
    -
    -type instance Data.ProtoLens.Field "attr" FunctionDef =
    -     Data.Map.Map Data.Text.Text
    -       Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "attr" FunctionDef FunctionDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'attr
    -              (\ x__ y__ -> x__{_FunctionDef'attr = y__})
    -
    -type instance Data.ProtoLens.Field "node" FunctionDef =
    -     [FunctionDef'Node]
    -
    -instance Data.ProtoLens.HasField "node" FunctionDef FunctionDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'node
    -              (\ x__ y__ -> x__{_FunctionDef'node = y__})
    -
    -type instance Data.ProtoLens.Field "nodeDef" FunctionDef =
    -     [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef]
    -
    -instance Data.ProtoLens.HasField "nodeDef" FunctionDef FunctionDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'nodeDef
    -              (\ x__ y__ -> x__{_FunctionDef'nodeDef = y__})
    -
    -type instance Data.ProtoLens.Field "ret" FunctionDef =
    -     Data.Map.Map Data.Text.Text Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "ret" FunctionDef FunctionDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'ret
    -              (\ x__ y__ -> x__{_FunctionDef'ret = y__})
    -
    -instance Data.Default.Class.Default FunctionDef where
    -        def
    -          = FunctionDef{_FunctionDef'signature = Prelude.Nothing,
    -                        _FunctionDef'attr = Data.Map.empty, _FunctionDef'node = [],
    -                        _FunctionDef'nodeDef = [], _FunctionDef'ret = Data.Map.empty}
    -
    -instance Data.ProtoLens.Message FunctionDef where
    -        descriptor
    -          = let signature__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "signature"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.OpDef.OpDef)
    -                      (Data.ProtoLens.OptionalField maybe'signature)
    -                attr__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "attr"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor FunctionDef'AttrEntry)
    -                      (Data.ProtoLens.MapField key value attr)
    -                node__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "node"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor FunctionDef'Node)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked node)
    -                nodeDef__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "node_def"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.NodeDef.NodeDef)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked nodeDef)
    -                ret__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "ret"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor FunctionDef'RetEntry)
    -                      (Data.ProtoLens.MapField key value ret)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, signature__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, attr__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, node__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, nodeDef__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, ret__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("signature", signature__field_descriptor),
    -                    ("attr", attr__field_descriptor), ("node", node__field_descriptor),
    -                    ("node_def", nodeDef__field_descriptor),
    -                    ("ret", ret__field_descriptor)])
    -
    -data FunctionDef'AttrEntry = FunctionDef'AttrEntry{_FunctionDef'AttrEntry'key
    -                                                   :: Data.Text.Text,
    -                                                   _FunctionDef'AttrEntry'value ::
    -                                                   Prelude.Maybe
    -                                                     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue}
    -                           deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "key" FunctionDef'AttrEntry =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "key" FunctionDef'AttrEntry
    -         FunctionDef'AttrEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'AttrEntry'key
    -              (\ x__ y__ -> x__{_FunctionDef'AttrEntry'key = y__})
    -
    -type instance Data.ProtoLens.Field "value" FunctionDef'AttrEntry =
    -     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "value" FunctionDef'AttrEntry
    -         FunctionDef'AttrEntry where
    -        field _
    -          = (Prelude..) maybe'value
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance
    -     Data.ProtoLens.Field "maybe'value" FunctionDef'AttrEntry =
    -     Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "maybe'value"
    -         FunctionDef'AttrEntry FunctionDef'AttrEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'AttrEntry'value
    -              (\ x__ y__ -> x__{_FunctionDef'AttrEntry'value = y__})
    -
    -instance Data.Default.Class.Default FunctionDef'AttrEntry where
    -        def
    -          = FunctionDef'AttrEntry{_FunctionDef'AttrEntry'key =
    -                                    Data.ProtoLens.fieldDefault,
    -                                  _FunctionDef'AttrEntry'value = Prelude.Nothing}
    -
    -instance Data.ProtoLens.Message FunctionDef'AttrEntry where
    -        descriptor
    -          = let key__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "key"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
    -                value__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "value"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
    -                      (Data.ProtoLens.OptionalField maybe'value)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("key", key__field_descriptor),
    -                    ("value", value__field_descriptor)])
    -
    -data FunctionDef'Node = FunctionDef'Node{_FunctionDef'Node'ret ::
    -                                         [Data.Text.Text],
    -                                         _FunctionDef'Node'op :: Data.Text.Text,
    -                                         _FunctionDef'Node'arg :: [Data.Text.Text],
    -                                         _FunctionDef'Node'dep :: [Data.Text.Text],
    -                                         _FunctionDef'Node'attr ::
    -                                         Data.Map.Map Data.Text.Text
    -                                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue}
    -                      deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "ret" FunctionDef'Node =
    -     [Data.Text.Text]
    -
    -instance Data.ProtoLens.HasField "ret" FunctionDef'Node
    -         FunctionDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'Node'ret
    -              (\ x__ y__ -> x__{_FunctionDef'Node'ret = y__})
    -
    -type instance Data.ProtoLens.Field "op" FunctionDef'Node =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "op" FunctionDef'Node
    -         FunctionDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'Node'op
    -              (\ x__ y__ -> x__{_FunctionDef'Node'op = y__})
    -
    -type instance Data.ProtoLens.Field "arg" FunctionDef'Node =
    -     [Data.Text.Text]
    -
    -instance Data.ProtoLens.HasField "arg" FunctionDef'Node
    -         FunctionDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'Node'arg
    -              (\ x__ y__ -> x__{_FunctionDef'Node'arg = y__})
    -
    -type instance Data.ProtoLens.Field "dep" FunctionDef'Node =
    -     [Data.Text.Text]
    -
    -instance Data.ProtoLens.HasField "dep" FunctionDef'Node
    -         FunctionDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'Node'dep
    -              (\ x__ y__ -> x__{_FunctionDef'Node'dep = y__})
    -
    -type instance Data.ProtoLens.Field "attr" FunctionDef'Node =
    -     Data.Map.Map Data.Text.Text
    -       Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "attr" FunctionDef'Node
    -         FunctionDef'Node where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'Node'attr
    -              (\ x__ y__ -> x__{_FunctionDef'Node'attr = y__})
    -
    -instance Data.Default.Class.Default FunctionDef'Node where
    -        def
    -          = FunctionDef'Node{_FunctionDef'Node'ret = [],
    -                             _FunctionDef'Node'op = Data.ProtoLens.fieldDefault,
    -                             _FunctionDef'Node'arg = [], _FunctionDef'Node'dep = [],
    -                             _FunctionDef'Node'attr = Data.Map.empty}
    -
    -instance Data.ProtoLens.Message FunctionDef'Node where
    -        descriptor
    -          = let ret__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "ret"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked ret)
    -                op__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "op"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional op)
    -                arg__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "arg"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked arg)
    -                dep__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "dep"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked dep)
    -                attr__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "attr"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor FunctionDef'Node'AttrEntry)
    -                      (Data.ProtoLens.MapField key value attr)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, ret__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, op__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, arg__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, dep__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, attr__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("ret", ret__field_descriptor), ("op", op__field_descriptor),
    -                    ("arg", arg__field_descriptor), ("dep", dep__field_descriptor),
    -                    ("attr", attr__field_descriptor)])
    -
    -data FunctionDef'Node'AttrEntry = FunctionDef'Node'AttrEntry{_FunctionDef'Node'AttrEntry'key
    -                                                             :: Data.Text.Text,
    -                                                             _FunctionDef'Node'AttrEntry'value ::
    -                                                             Prelude.Maybe
    -                                                               Proto.Tensorflow.Core.Framework.AttrValue.AttrValue}
    -                                deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "key" FunctionDef'Node'AttrEntry
    -     = Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "key" FunctionDef'Node'AttrEntry
    -         FunctionDef'Node'AttrEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'Node'AttrEntry'key
    -              (\ x__ y__ -> x__{_FunctionDef'Node'AttrEntry'key = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "value" FunctionDef'Node'AttrEntry =
    -     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "value" FunctionDef'Node'AttrEntry
    -         FunctionDef'Node'AttrEntry where
    -        field _
    -          = (Prelude..) maybe'value
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance
    -     Data.ProtoLens.Field "maybe'value" FunctionDef'Node'AttrEntry =
    -     Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "maybe'value"
    -         FunctionDef'Node'AttrEntry FunctionDef'Node'AttrEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'Node'AttrEntry'value
    -              (\ x__ y__ -> x__{_FunctionDef'Node'AttrEntry'value = y__})
    -
    -instance Data.Default.Class.Default FunctionDef'Node'AttrEntry
    -         where
    -        def
    -          = FunctionDef'Node'AttrEntry{_FunctionDef'Node'AttrEntry'key =
    -                                         Data.ProtoLens.fieldDefault,
    -                                       _FunctionDef'Node'AttrEntry'value = Prelude.Nothing}
    -
    -instance Data.ProtoLens.Message FunctionDef'Node'AttrEntry where
    -        descriptor
    -          = let key__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "key"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
    -                value__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "value"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
    -                      (Data.ProtoLens.OptionalField maybe'value)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("key", key__field_descriptor),
    -                    ("value", value__field_descriptor)])
    -
    -data FunctionDef'RetEntry = FunctionDef'RetEntry{_FunctionDef'RetEntry'key
    -                                                 :: Data.Text.Text,
    -                                                 _FunctionDef'RetEntry'value :: Data.Text.Text}
    -                          deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "key" FunctionDef'RetEntry =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "key" FunctionDef'RetEntry
    -         FunctionDef'RetEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'RetEntry'key
    -              (\ x__ y__ -> x__{_FunctionDef'RetEntry'key = y__})
    -
    -type instance Data.ProtoLens.Field "value" FunctionDef'RetEntry =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "value" FunctionDef'RetEntry
    -         FunctionDef'RetEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDef'RetEntry'value
    -              (\ x__ y__ -> x__{_FunctionDef'RetEntry'value = y__})
    -
    -instance Data.Default.Class.Default FunctionDef'RetEntry where
    -        def
    -          = FunctionDef'RetEntry{_FunctionDef'RetEntry'key =
    -                                   Data.ProtoLens.fieldDefault,
    -                                 _FunctionDef'RetEntry'value = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message FunctionDef'RetEntry where
    -        descriptor
    -          = let key__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "key"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
    -                value__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "value"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional value)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("key", key__field_descriptor),
    -                    ("value", value__field_descriptor)])
    -
    -data FunctionDefLibrary = FunctionDefLibrary{_FunctionDefLibrary'function
    -                                             :: [FunctionDef],
    -                                             _FunctionDefLibrary'gradient :: [GradientDef]}
    -                        deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "function" FunctionDefLibrary =
    -     [FunctionDef]
    -
    -instance Data.ProtoLens.HasField "function" FunctionDefLibrary
    -         FunctionDefLibrary where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDefLibrary'function
    -              (\ x__ y__ -> x__{_FunctionDefLibrary'function = y__})
    -
    -type instance Data.ProtoLens.Field "gradient" FunctionDefLibrary =
    -     [GradientDef]
    -
    -instance Data.ProtoLens.HasField "gradient" FunctionDefLibrary
    -         FunctionDefLibrary where
    -        field _
    -          = Lens.Family2.Unchecked.lens _FunctionDefLibrary'gradient
    -              (\ x__ y__ -> x__{_FunctionDefLibrary'gradient = y__})
    -
    -instance Data.Default.Class.Default FunctionDefLibrary where
    -        def
    -          = FunctionDefLibrary{_FunctionDefLibrary'function = [],
    -                               _FunctionDefLibrary'gradient = []}
    -
    -instance Data.ProtoLens.Message FunctionDefLibrary where
    -        descriptor
    -          = let function__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "function"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor FunctionDef)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked function)
    -                gradient__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "gradient"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor GradientDef)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked gradient)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, function__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, gradient__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("function", function__field_descriptor),
    -                    ("gradient", gradient__field_descriptor)])
    -
    -data GradientDef = GradientDef{_GradientDef'functionName ::
    -                               Data.Text.Text,
    -                               _GradientDef'gradientFunc :: Data.Text.Text}
    -                 deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "functionName" GradientDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "functionName" GradientDef
    -         GradientDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GradientDef'functionName
    -              (\ x__ y__ -> x__{_GradientDef'functionName = y__})
    -
    -type instance Data.ProtoLens.Field "gradientFunc" GradientDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "gradientFunc" GradientDef
    -         GradientDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GradientDef'gradientFunc
    -              (\ x__ y__ -> x__{_GradientDef'gradientFunc = y__})
    -
    -instance Data.Default.Class.Default GradientDef where
    -        def
    -          = GradientDef{_GradientDef'functionName =
    -                          Data.ProtoLens.fieldDefault,
    -                        _GradientDef'gradientFunc = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message GradientDef where
    -        descriptor
    -          = let functionName__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "function_name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional functionName)
    -                gradientFunc__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "gradient_func"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional gradientFunc)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, functionName__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, gradientFunc__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("function_name", functionName__field_descriptor),
    -                    ("gradient_func", gradientFunc__field_descriptor)])
    -
    -arg ::
    -    forall msg msg' . Data.ProtoLens.HasField "arg" msg msg' =>
    -      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "arg" msg)
    -        (Data.ProtoLens.Field "arg" msg')
    -arg
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "arg")
    -
    -attr ::
    -     forall msg msg' . Data.ProtoLens.HasField "attr" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "attr" msg)
    -         (Data.ProtoLens.Field "attr" msg')
    -attr
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "attr")
    -
    -dep ::
    -    forall msg msg' . Data.ProtoLens.HasField "dep" msg msg' =>
    -      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dep" msg)
    -        (Data.ProtoLens.Field "dep" msg')
    -dep
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dep")
    -
    -function ::
    -         forall msg msg' . Data.ProtoLens.HasField "function" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "function" msg)
    -             (Data.ProtoLens.Field "function" msg')
    -function
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "function")
    -
    -functionName ::
    -             forall msg msg' .
    -               Data.ProtoLens.HasField "functionName" msg msg' =>
    -               Lens.Family2.Lens msg msg'
    -                 (Data.ProtoLens.Field "functionName" msg)
    -                 (Data.ProtoLens.Field "functionName" msg')
    -functionName
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "functionName")
    -
    -gradient ::
    -         forall msg msg' . Data.ProtoLens.HasField "gradient" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "gradient" msg)
    -             (Data.ProtoLens.Field "gradient" msg')
    -gradient
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "gradient")
    -
    -gradientFunc ::
    -             forall msg msg' .
    -               Data.ProtoLens.HasField "gradientFunc" msg msg' =>
    -               Lens.Family2.Lens msg msg'
    -                 (Data.ProtoLens.Field "gradientFunc" msg)
    -                 (Data.ProtoLens.Field "gradientFunc" msg')
    -gradientFunc
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "gradientFunc")
    -
    -key ::
    -    forall msg msg' . Data.ProtoLens.HasField "key" msg msg' =>
    -      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "key" msg)
    -        (Data.ProtoLens.Field "key" msg')
    -key
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "key")
    -
    -maybe'signature ::
    -                forall msg msg' .
    -                  Data.ProtoLens.HasField "maybe'signature" msg msg' =>
    -                  Lens.Family2.Lens msg msg'
    -                    (Data.ProtoLens.Field "maybe'signature" msg)
    -                    (Data.ProtoLens.Field "maybe'signature" msg')
    -maybe'signature
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'signature")
    -
    -maybe'value ::
    -            forall msg msg' . Data.ProtoLens.HasField "maybe'value" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'value" msg)
    -                (Data.ProtoLens.Field "maybe'value" msg')
    -maybe'value
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'value")
    -
    -node ::
    -     forall msg msg' . Data.ProtoLens.HasField "node" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "node" msg)
    -         (Data.ProtoLens.Field "node" msg')
    -node
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "node")
    -
    -nodeDef ::
    -        forall msg msg' . Data.ProtoLens.HasField "nodeDef" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "nodeDef" msg)
    -            (Data.ProtoLens.Field "nodeDef" msg')
    -nodeDef
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "nodeDef")
    -
    -op ::
    -   forall msg msg' . Data.ProtoLens.HasField "op" msg msg' =>
    -     Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "op" msg)
    -       (Data.ProtoLens.Field "op" msg')
    -op
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "op")
    -
    -ret ::
    -    forall msg msg' . Data.ProtoLens.HasField "ret" msg msg' =>
    -      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "ret" msg)
    -        (Data.ProtoLens.Field "ret" msg')
    -ret
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "ret")
    -
    -signature ::
    -          forall msg msg' . Data.ProtoLens.HasField "signature" msg msg' =>
    -            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "signature" msg)
    -              (Data.ProtoLens.Field "signature" msg')
    -signature
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "signature")
    -
    -value ::
    -      forall msg msg' . Data.ProtoLens.HasField "value" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "value" msg)
    -          (Data.ProtoLens.Field "value" msg')
    -value
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "value")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Graph.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Graph.html deleted file mode 100644 index fea1716..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Graph.html +++ /dev/null @@ -1,198 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/Graph.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/graph.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.Graph where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -import qualified Proto.Tensorflow.Core.Framework.Function
    -import qualified Proto.Tensorflow.Core.Framework.NodeDef
    -import qualified Proto.Tensorflow.Core.Framework.Versions
    -
    -data GraphDef = GraphDef{_GraphDef'node ::
    -                         [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef],
    -                         _GraphDef'versions ::
    -                         Prelude.Maybe Proto.Tensorflow.Core.Framework.Versions.VersionDef,
    -                         _GraphDef'version :: Data.Int.Int32,
    -                         _GraphDef'library ::
    -                         Prelude.Maybe
    -                           Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary}
    -              deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "node" GraphDef =
    -     [Proto.Tensorflow.Core.Framework.NodeDef.NodeDef]
    -
    -instance Data.ProtoLens.HasField "node" GraphDef GraphDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphDef'node
    -              (\ x__ y__ -> x__{_GraphDef'node = y__})
    -
    -type instance Data.ProtoLens.Field "versions" GraphDef =
    -     Proto.Tensorflow.Core.Framework.Versions.VersionDef
    -
    -instance Data.ProtoLens.HasField "versions" GraphDef GraphDef where
    -        field _
    -          = (Prelude..) maybe'versions
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'versions" GraphDef =
    -     Prelude.Maybe Proto.Tensorflow.Core.Framework.Versions.VersionDef
    -
    -instance Data.ProtoLens.HasField "maybe'versions" GraphDef GraphDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphDef'versions
    -              (\ x__ y__ -> x__{_GraphDef'versions = y__})
    -
    -type instance Data.ProtoLens.Field "version" GraphDef =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "version" GraphDef GraphDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphDef'version
    -              (\ x__ y__ -> x__{_GraphDef'version = y__})
    -
    -type instance Data.ProtoLens.Field "library" GraphDef =
    -     Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary
    -
    -instance Data.ProtoLens.HasField "library" GraphDef GraphDef where
    -        field _
    -          = (Prelude..) maybe'library
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'library" GraphDef =
    -     Prelude.Maybe
    -       Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary
    -
    -instance Data.ProtoLens.HasField "maybe'library" GraphDef GraphDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphDef'library
    -              (\ x__ y__ -> x__{_GraphDef'library = y__})
    -
    -instance Data.Default.Class.Default GraphDef where
    -        def
    -          = GraphDef{_GraphDef'node = [],
    -                     _GraphDef'versions = Prelude.Nothing,
    -                     _GraphDef'version = Data.ProtoLens.fieldDefault,
    -                     _GraphDef'library = Prelude.Nothing}
    -
    -instance Data.ProtoLens.Message GraphDef where
    -        descriptor
    -          = let node__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "node"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.NodeDef.NodeDef)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked node)
    -                versions__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "versions"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.Versions.VersionDef)
    -                      (Data.ProtoLens.OptionalField maybe'versions)
    -                version__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "version"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional version)
    -                library__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "library"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.Function.FunctionDefLibrary)
    -                      (Data.ProtoLens.OptionalField maybe'library)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, node__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, versions__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, version__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, library__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("node", node__field_descriptor),
    -                    ("versions", versions__field_descriptor),
    -                    ("version", version__field_descriptor),
    -                    ("library", library__field_descriptor)])
    -
    -library ::
    -        forall msg msg' . Data.ProtoLens.HasField "library" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "library" msg)
    -            (Data.ProtoLens.Field "library" msg')
    -library
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "library")
    -
    -maybe'library ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "maybe'library" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "maybe'library" msg)
    -                  (Data.ProtoLens.Field "maybe'library" msg')
    -maybe'library
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'library")
    -
    -maybe'versions ::
    -               forall msg msg' .
    -                 Data.ProtoLens.HasField "maybe'versions" msg msg' =>
    -                 Lens.Family2.Lens msg msg'
    -                   (Data.ProtoLens.Field "maybe'versions" msg)
    -                   (Data.ProtoLens.Field "maybe'versions" msg')
    -maybe'versions
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'versions")
    -
    -node ::
    -     forall msg msg' . Data.ProtoLens.HasField "node" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "node" msg)
    -         (Data.ProtoLens.Field "node" msg')
    -node
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "node")
    -
    -version ::
    -        forall msg msg' . Data.ProtoLens.HasField "version" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "version" msg)
    -            (Data.ProtoLens.Field "version" msg')
    -version
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "version")
    -
    -versions ::
    -         forall msg msg' . Data.ProtoLens.HasField "versions" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "versions" msg)
    -             (Data.ProtoLens.Field "versions" msg')
    -versions
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "versions")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-NodeDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-NodeDef.html deleted file mode 100644 index 2062118..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-NodeDef.html +++ /dev/null @@ -1,257 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/NodeDef.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/node_def.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.NodeDef where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -import qualified Proto.Tensorflow.Core.Framework.AttrValue
    -
    -data NodeDef = NodeDef{_NodeDef'name :: Data.Text.Text,
    -                       _NodeDef'op :: Data.Text.Text, _NodeDef'input :: [Data.Text.Text],
    -                       _NodeDef'device :: Data.Text.Text,
    -                       _NodeDef'attr ::
    -                       Data.Map.Map Data.Text.Text
    -                         Proto.Tensorflow.Core.Framework.AttrValue.AttrValue}
    -             deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "name" NodeDef = Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "name" NodeDef NodeDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeDef'name
    -              (\ x__ y__ -> x__{_NodeDef'name = y__})
    -
    -type instance Data.ProtoLens.Field "op" NodeDef = Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "op" NodeDef NodeDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeDef'op
    -              (\ x__ y__ -> x__{_NodeDef'op = y__})
    -
    -type instance Data.ProtoLens.Field "input" NodeDef =
    -     [Data.Text.Text]
    -
    -instance Data.ProtoLens.HasField "input" NodeDef NodeDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeDef'input
    -              (\ x__ y__ -> x__{_NodeDef'input = y__})
    -
    -type instance Data.ProtoLens.Field "device" NodeDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "device" NodeDef NodeDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeDef'device
    -              (\ x__ y__ -> x__{_NodeDef'device = y__})
    -
    -type instance Data.ProtoLens.Field "attr" NodeDef =
    -     Data.Map.Map Data.Text.Text
    -       Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "attr" NodeDef NodeDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeDef'attr
    -              (\ x__ y__ -> x__{_NodeDef'attr = y__})
    -
    -instance Data.Default.Class.Default NodeDef where
    -        def
    -          = NodeDef{_NodeDef'name = Data.ProtoLens.fieldDefault,
    -                    _NodeDef'op = Data.ProtoLens.fieldDefault, _NodeDef'input = [],
    -                    _NodeDef'device = Data.ProtoLens.fieldDefault,
    -                    _NodeDef'attr = Data.Map.empty}
    -
    -instance Data.ProtoLens.Message NodeDef where
    -        descriptor
    -          = let name__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    -                op__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "op"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional op)
    -                input__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "input"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked input)
    -                device__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "device"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
    -                attr__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "attr"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor NodeDef'AttrEntry)
    -                      (Data.ProtoLens.MapField key value attr)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, op__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, input__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, device__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, attr__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("name", name__field_descriptor), ("op", op__field_descriptor),
    -                    ("input", input__field_descriptor),
    -                    ("device", device__field_descriptor),
    -                    ("attr", attr__field_descriptor)])
    -
    -data NodeDef'AttrEntry = NodeDef'AttrEntry{_NodeDef'AttrEntry'key
    -                                           :: Data.Text.Text,
    -                                           _NodeDef'AttrEntry'value ::
    -                                           Prelude.Maybe
    -                                             Proto.Tensorflow.Core.Framework.AttrValue.AttrValue}
    -                       deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "key" NodeDef'AttrEntry =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "key" NodeDef'AttrEntry
    -         NodeDef'AttrEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeDef'AttrEntry'key
    -              (\ x__ y__ -> x__{_NodeDef'AttrEntry'key = y__})
    -
    -type instance Data.ProtoLens.Field "value" NodeDef'AttrEntry =
    -     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "value" NodeDef'AttrEntry
    -         NodeDef'AttrEntry where
    -        field _
    -          = (Prelude..) maybe'value
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'value" NodeDef'AttrEntry
    -     = Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "maybe'value" NodeDef'AttrEntry
    -         NodeDef'AttrEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeDef'AttrEntry'value
    -              (\ x__ y__ -> x__{_NodeDef'AttrEntry'value = y__})
    -
    -instance Data.Default.Class.Default NodeDef'AttrEntry where
    -        def
    -          = NodeDef'AttrEntry{_NodeDef'AttrEntry'key =
    -                                Data.ProtoLens.fieldDefault,
    -                              _NodeDef'AttrEntry'value = Prelude.Nothing}
    -
    -instance Data.ProtoLens.Message NodeDef'AttrEntry where
    -        descriptor
    -          = let key__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "key"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
    -                value__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "value"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
    -                      (Data.ProtoLens.OptionalField maybe'value)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("key", key__field_descriptor),
    -                    ("value", value__field_descriptor)])
    -
    -attr ::
    -     forall msg msg' . Data.ProtoLens.HasField "attr" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "attr" msg)
    -         (Data.ProtoLens.Field "attr" msg')
    -attr
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "attr")
    -
    -device ::
    -       forall msg msg' . Data.ProtoLens.HasField "device" msg msg' =>
    -         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "device" msg)
    -           (Data.ProtoLens.Field "device" msg')
    -device
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "device")
    -
    -input ::
    -      forall msg msg' . Data.ProtoLens.HasField "input" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "input" msg)
    -          (Data.ProtoLens.Field "input" msg')
    -input
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "input")
    -
    -key ::
    -    forall msg msg' . Data.ProtoLens.HasField "key" msg msg' =>
    -      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "key" msg)
    -        (Data.ProtoLens.Field "key" msg')
    -key
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "key")
    -
    -maybe'value ::
    -            forall msg msg' . Data.ProtoLens.HasField "maybe'value" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'value" msg)
    -                (Data.ProtoLens.Field "maybe'value" msg')
    -maybe'value
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'value")
    -
    -name ::
    -     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    -         (Data.ProtoLens.Field "name" msg')
    -name
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    -
    -op ::
    -   forall msg msg' . Data.ProtoLens.HasField "op" msg msg' =>
    -     Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "op" msg)
    -       (Data.ProtoLens.Field "op" msg')
    -op
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "op")
    -
    -value ::
    -      forall msg msg' . Data.ProtoLens.HasField "value" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "value" msg)
    -          (Data.ProtoLens.Field "value" msg')
    -value
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "value")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-OpDef.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-OpDef.html deleted file mode 100644 index 725afc5..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-OpDef.html +++ /dev/null @@ -1,854 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/OpDef.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/op_def.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.OpDef where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -import qualified Proto.Tensorflow.Core.Framework.AttrValue
    -import qualified Proto.Tensorflow.Core.Framework.Types
    -
    -data OpDef = OpDef{_OpDef'name :: Data.Text.Text,
    -                   _OpDef'inputArg :: [OpDef'ArgDef],
    -                   _OpDef'outputArg :: [OpDef'ArgDef], _OpDef'attr :: [OpDef'AttrDef],
    -                   _OpDef'deprecation :: Prelude.Maybe OpDeprecation,
    -                   _OpDef'summary :: Data.Text.Text,
    -                   _OpDef'description :: Data.Text.Text,
    -                   _OpDef'isCommutative :: Prelude.Bool,
    -                   _OpDef'isAggregate :: Prelude.Bool,
    -                   _OpDef'isStateful :: Prelude.Bool,
    -                   _OpDef'allowsUninitializedInput :: Prelude.Bool}
    -           deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "name" OpDef = Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "name" OpDef OpDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'name
    -              (\ x__ y__ -> x__{_OpDef'name = y__})
    -
    -type instance Data.ProtoLens.Field "inputArg" OpDef =
    -     [OpDef'ArgDef]
    -
    -instance Data.ProtoLens.HasField "inputArg" OpDef OpDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'inputArg
    -              (\ x__ y__ -> x__{_OpDef'inputArg = y__})
    -
    -type instance Data.ProtoLens.Field "outputArg" OpDef =
    -     [OpDef'ArgDef]
    -
    -instance Data.ProtoLens.HasField "outputArg" OpDef OpDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'outputArg
    -              (\ x__ y__ -> x__{_OpDef'outputArg = y__})
    -
    -type instance Data.ProtoLens.Field "attr" OpDef = [OpDef'AttrDef]
    -
    -instance Data.ProtoLens.HasField "attr" OpDef OpDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'attr
    -              (\ x__ y__ -> x__{_OpDef'attr = y__})
    -
    -type instance Data.ProtoLens.Field "deprecation" OpDef =
    -     OpDeprecation
    -
    -instance Data.ProtoLens.HasField "deprecation" OpDef OpDef where
    -        field _
    -          = (Prelude..) maybe'deprecation
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'deprecation" OpDef =
    -     Prelude.Maybe OpDeprecation
    -
    -instance Data.ProtoLens.HasField "maybe'deprecation" OpDef OpDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'deprecation
    -              (\ x__ y__ -> x__{_OpDef'deprecation = y__})
    -
    -type instance Data.ProtoLens.Field "summary" OpDef = Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "summary" OpDef OpDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'summary
    -              (\ x__ y__ -> x__{_OpDef'summary = y__})
    -
    -type instance Data.ProtoLens.Field "description" OpDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "description" OpDef OpDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'description
    -              (\ x__ y__ -> x__{_OpDef'description = y__})
    -
    -type instance Data.ProtoLens.Field "isCommutative" OpDef =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "isCommutative" OpDef OpDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'isCommutative
    -              (\ x__ y__ -> x__{_OpDef'isCommutative = y__})
    -
    -type instance Data.ProtoLens.Field "isAggregate" OpDef =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "isAggregate" OpDef OpDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'isAggregate
    -              (\ x__ y__ -> x__{_OpDef'isAggregate = y__})
    -
    -type instance Data.ProtoLens.Field "isStateful" OpDef =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "isStateful" OpDef OpDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'isStateful
    -              (\ x__ y__ -> x__{_OpDef'isStateful = y__})
    -
    -type instance Data.ProtoLens.Field "allowsUninitializedInput" OpDef
    -     = Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "allowsUninitializedInput" OpDef
    -         OpDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'allowsUninitializedInput
    -              (\ x__ y__ -> x__{_OpDef'allowsUninitializedInput = y__})
    -
    -instance Data.Default.Class.Default OpDef where
    -        def
    -          = OpDef{_OpDef'name = Data.ProtoLens.fieldDefault,
    -                  _OpDef'inputArg = [], _OpDef'outputArg = [], _OpDef'attr = [],
    -                  _OpDef'deprecation = Prelude.Nothing,
    -                  _OpDef'summary = Data.ProtoLens.fieldDefault,
    -                  _OpDef'description = Data.ProtoLens.fieldDefault,
    -                  _OpDef'isCommutative = Data.ProtoLens.fieldDefault,
    -                  _OpDef'isAggregate = Data.ProtoLens.fieldDefault,
    -                  _OpDef'isStateful = Data.ProtoLens.fieldDefault,
    -                  _OpDef'allowsUninitializedInput = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message OpDef where
    -        descriptor
    -          = let name__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    -                inputArg__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "input_arg"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor OpDef'ArgDef)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked inputArg)
    -                outputArg__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "output_arg"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor OpDef'ArgDef)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked outputArg)
    -                attr__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "attr"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor OpDef'AttrDef)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked attr)
    -                deprecation__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "deprecation"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor OpDeprecation)
    -                      (Data.ProtoLens.OptionalField maybe'deprecation)
    -                summary__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "summary"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional summary)
    -                description__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "description"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional description)
    -                isCommutative__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "is_commutative"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isCommutative)
    -                isAggregate__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "is_aggregate"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isAggregate)
    -                isStateful__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "is_stateful"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isStateful)
    -                allowsUninitializedInput__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "allows_uninitialized_input"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         allowsUninitializedInput)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, inputArg__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, outputArg__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, attr__field_descriptor),
    -                    (Data.ProtoLens.Tag 8, deprecation__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, summary__field_descriptor),
    -                    (Data.ProtoLens.Tag 6, description__field_descriptor),
    -                    (Data.ProtoLens.Tag 18, isCommutative__field_descriptor),
    -                    (Data.ProtoLens.Tag 16, isAggregate__field_descriptor),
    -                    (Data.ProtoLens.Tag 17, isStateful__field_descriptor),
    -                    (Data.ProtoLens.Tag 19,
    -                     allowsUninitializedInput__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("name", name__field_descriptor),
    -                    ("input_arg", inputArg__field_descriptor),
    -                    ("output_arg", outputArg__field_descriptor),
    -                    ("attr", attr__field_descriptor),
    -                    ("deprecation", deprecation__field_descriptor),
    -                    ("summary", summary__field_descriptor),
    -                    ("description", description__field_descriptor),
    -                    ("is_commutative", isCommutative__field_descriptor),
    -                    ("is_aggregate", isAggregate__field_descriptor),
    -                    ("is_stateful", isStateful__field_descriptor),
    -                    ("allows_uninitialized_input",
    -                     allowsUninitializedInput__field_descriptor)])
    -
    -data OpDef'ArgDef = OpDef'ArgDef{_OpDef'ArgDef'name ::
    -                                 Data.Text.Text,
    -                                 _OpDef'ArgDef'description :: Data.Text.Text,
    -                                 _OpDef'ArgDef'type' ::
    -                                 Proto.Tensorflow.Core.Framework.Types.DataType,
    -                                 _OpDef'ArgDef'typeAttr :: Data.Text.Text,
    -                                 _OpDef'ArgDef'numberAttr :: Data.Text.Text,
    -                                 _OpDef'ArgDef'typeListAttr :: Data.Text.Text,
    -                                 _OpDef'ArgDef'isRef :: Prelude.Bool}
    -                  deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "name" OpDef'ArgDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "name" OpDef'ArgDef OpDef'ArgDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'name
    -              (\ x__ y__ -> x__{_OpDef'ArgDef'name = y__})
    -
    -type instance Data.ProtoLens.Field "description" OpDef'ArgDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "description" OpDef'ArgDef
    -         OpDef'ArgDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'description
    -              (\ x__ y__ -> x__{_OpDef'ArgDef'description = y__})
    -
    -type instance Data.ProtoLens.Field "type'" OpDef'ArgDef =
    -     Proto.Tensorflow.Core.Framework.Types.DataType
    -
    -instance Data.ProtoLens.HasField "type'" OpDef'ArgDef OpDef'ArgDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'type'
    -              (\ x__ y__ -> x__{_OpDef'ArgDef'type' = y__})
    -
    -type instance Data.ProtoLens.Field "typeAttr" OpDef'ArgDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "typeAttr" OpDef'ArgDef
    -         OpDef'ArgDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'typeAttr
    -              (\ x__ y__ -> x__{_OpDef'ArgDef'typeAttr = y__})
    -
    -type instance Data.ProtoLens.Field "numberAttr" OpDef'ArgDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "numberAttr" OpDef'ArgDef
    -         OpDef'ArgDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'numberAttr
    -              (\ x__ y__ -> x__{_OpDef'ArgDef'numberAttr = y__})
    -
    -type instance Data.ProtoLens.Field "typeListAttr" OpDef'ArgDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "typeListAttr" OpDef'ArgDef
    -         OpDef'ArgDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'typeListAttr
    -              (\ x__ y__ -> x__{_OpDef'ArgDef'typeListAttr = y__})
    -
    -type instance Data.ProtoLens.Field "isRef" OpDef'ArgDef =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "isRef" OpDef'ArgDef OpDef'ArgDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'ArgDef'isRef
    -              (\ x__ y__ -> x__{_OpDef'ArgDef'isRef = y__})
    -
    -instance Data.Default.Class.Default OpDef'ArgDef where
    -        def
    -          = OpDef'ArgDef{_OpDef'ArgDef'name = Data.ProtoLens.fieldDefault,
    -                         _OpDef'ArgDef'description = Data.ProtoLens.fieldDefault,
    -                         _OpDef'ArgDef'type' = Data.Default.Class.def,
    -                         _OpDef'ArgDef'typeAttr = Data.ProtoLens.fieldDefault,
    -                         _OpDef'ArgDef'numberAttr = Data.ProtoLens.fieldDefault,
    -                         _OpDef'ArgDef'typeListAttr = Data.ProtoLens.fieldDefault,
    -                         _OpDef'ArgDef'isRef = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message OpDef'ArgDef where
    -        descriptor
    -          = let name__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    -                description__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "description"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional description)
    -                type'__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "type"
    -                      (Data.ProtoLens.EnumField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.Types.DataType)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional type')
    -                typeAttr__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "type_attr"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional typeAttr)
    -                numberAttr__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "number_attr"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numberAttr)
    -                typeListAttr__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "type_list_attr"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional typeListAttr)
    -                isRef__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "is_ref"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional isRef)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, description__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, type'__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, typeAttr__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, numberAttr__field_descriptor),
    -                    (Data.ProtoLens.Tag 6, typeListAttr__field_descriptor),
    -                    (Data.ProtoLens.Tag 16, isRef__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("name", name__field_descriptor),
    -                    ("description", description__field_descriptor),
    -                    ("type", type'__field_descriptor),
    -                    ("type_attr", typeAttr__field_descriptor),
    -                    ("number_attr", numberAttr__field_descriptor),
    -                    ("type_list_attr", typeListAttr__field_descriptor),
    -                    ("is_ref", isRef__field_descriptor)])
    -
    -data OpDef'AttrDef = OpDef'AttrDef{_OpDef'AttrDef'name ::
    -                                   Data.Text.Text,
    -                                   _OpDef'AttrDef'type' :: Data.Text.Text,
    -                                   _OpDef'AttrDef'defaultValue ::
    -                                   Prelude.Maybe
    -                                     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue,
    -                                   _OpDef'AttrDef'description :: Data.Text.Text,
    -                                   _OpDef'AttrDef'hasMinimum :: Prelude.Bool,
    -                                   _OpDef'AttrDef'minimum :: Data.Int.Int64,
    -                                   _OpDef'AttrDef'allowedValues ::
    -                                   Prelude.Maybe
    -                                     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue}
    -                   deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "name" OpDef'AttrDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "name" OpDef'AttrDef OpDef'AttrDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'name
    -              (\ x__ y__ -> x__{_OpDef'AttrDef'name = y__})
    -
    -type instance Data.ProtoLens.Field "type'" OpDef'AttrDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "type'" OpDef'AttrDef
    -         OpDef'AttrDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'type'
    -              (\ x__ y__ -> x__{_OpDef'AttrDef'type' = y__})
    -
    -type instance Data.ProtoLens.Field "defaultValue" OpDef'AttrDef =
    -     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "defaultValue" OpDef'AttrDef
    -         OpDef'AttrDef where
    -        field _
    -          = (Prelude..) maybe'defaultValue
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance
    -     Data.ProtoLens.Field "maybe'defaultValue" OpDef'AttrDef =
    -     Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "maybe'defaultValue" OpDef'AttrDef
    -         OpDef'AttrDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'defaultValue
    -              (\ x__ y__ -> x__{_OpDef'AttrDef'defaultValue = y__})
    -
    -type instance Data.ProtoLens.Field "description" OpDef'AttrDef =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "description" OpDef'AttrDef
    -         OpDef'AttrDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'description
    -              (\ x__ y__ -> x__{_OpDef'AttrDef'description = y__})
    -
    -type instance Data.ProtoLens.Field "hasMinimum" OpDef'AttrDef =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "hasMinimum" OpDef'AttrDef
    -         OpDef'AttrDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'hasMinimum
    -              (\ x__ y__ -> x__{_OpDef'AttrDef'hasMinimum = y__})
    -
    -type instance Data.ProtoLens.Field "minimum" OpDef'AttrDef =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "minimum" OpDef'AttrDef
    -         OpDef'AttrDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'minimum
    -              (\ x__ y__ -> x__{_OpDef'AttrDef'minimum = y__})
    -
    -type instance Data.ProtoLens.Field "allowedValues" OpDef'AttrDef =
    -     Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "allowedValues" OpDef'AttrDef
    -         OpDef'AttrDef where
    -        field _
    -          = (Prelude..) maybe'allowedValues
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance
    -     Data.ProtoLens.Field "maybe'allowedValues" OpDef'AttrDef =
    -     Prelude.Maybe Proto.Tensorflow.Core.Framework.AttrValue.AttrValue
    -
    -instance Data.ProtoLens.HasField "maybe'allowedValues"
    -         OpDef'AttrDef OpDef'AttrDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDef'AttrDef'allowedValues
    -              (\ x__ y__ -> x__{_OpDef'AttrDef'allowedValues = y__})
    -
    -instance Data.Default.Class.Default OpDef'AttrDef where
    -        def
    -          = OpDef'AttrDef{_OpDef'AttrDef'name = Data.ProtoLens.fieldDefault,
    -                          _OpDef'AttrDef'type' = Data.ProtoLens.fieldDefault,
    -                          _OpDef'AttrDef'defaultValue = Prelude.Nothing,
    -                          _OpDef'AttrDef'description = Data.ProtoLens.fieldDefault,
    -                          _OpDef'AttrDef'hasMinimum = Data.ProtoLens.fieldDefault,
    -                          _OpDef'AttrDef'minimum = Data.ProtoLens.fieldDefault,
    -                          _OpDef'AttrDef'allowedValues = Prelude.Nothing}
    -
    -instance Data.ProtoLens.Message OpDef'AttrDef where
    -        descriptor
    -          = let name__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    -                type'__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "type"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional type')
    -                defaultValue__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "default_value"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
    -                      (Data.ProtoLens.OptionalField maybe'defaultValue)
    -                description__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "description"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional description)
    -                hasMinimum__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "has_minimum"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional hasMinimum)
    -                minimum__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "minimum"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional minimum)
    -                allowedValues__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "allowed_values"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.AttrValue.AttrValue)
    -                      (Data.ProtoLens.OptionalField maybe'allowedValues)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, name__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, type'__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, defaultValue__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, description__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, hasMinimum__field_descriptor),
    -                    (Data.ProtoLens.Tag 6, minimum__field_descriptor),
    -                    (Data.ProtoLens.Tag 7, allowedValues__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("name", name__field_descriptor),
    -                    ("type", type'__field_descriptor),
    -                    ("default_value", defaultValue__field_descriptor),
    -                    ("description", description__field_descriptor),
    -                    ("has_minimum", hasMinimum__field_descriptor),
    -                    ("minimum", minimum__field_descriptor),
    -                    ("allowed_values", allowedValues__field_descriptor)])
    -
    -data OpDeprecation = OpDeprecation{_OpDeprecation'version ::
    -                                   Data.Int.Int32,
    -                                   _OpDeprecation'explanation :: Data.Text.Text}
    -                   deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "version" OpDeprecation =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "version" OpDeprecation
    -         OpDeprecation where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDeprecation'version
    -              (\ x__ y__ -> x__{_OpDeprecation'version = y__})
    -
    -type instance Data.ProtoLens.Field "explanation" OpDeprecation =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "explanation" OpDeprecation
    -         OpDeprecation where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpDeprecation'explanation
    -              (\ x__ y__ -> x__{_OpDeprecation'explanation = y__})
    -
    -instance Data.Default.Class.Default OpDeprecation where
    -        def
    -          = OpDeprecation{_OpDeprecation'version =
    -                            Data.ProtoLens.fieldDefault,
    -                          _OpDeprecation'explanation = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message OpDeprecation where
    -        descriptor
    -          = let version__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "version"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional version)
    -                explanation__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "explanation"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional explanation)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, version__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, explanation__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("version", version__field_descriptor),
    -                    ("explanation", explanation__field_descriptor)])
    -
    -data OpList = OpList{_OpList'op :: [OpDef]}
    -            deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "op" OpList = [OpDef]
    -
    -instance Data.ProtoLens.HasField "op" OpList OpList where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OpList'op
    -              (\ x__ y__ -> x__{_OpList'op = y__})
    -
    -instance Data.Default.Class.Default OpList where
    -        def = OpList{_OpList'op = []}
    -
    -instance Data.ProtoLens.Message OpList where
    -        descriptor
    -          = let op__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "op"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor OpDef)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked op)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList [(Data.ProtoLens.Tag 1, op__field_descriptor)])
    -                (Data.Map.fromList [("op", op__field_descriptor)])
    -
    -allowedValues ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "allowedValues" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "allowedValues" msg)
    -                  (Data.ProtoLens.Field "allowedValues" msg')
    -allowedValues
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "allowedValues")
    -
    -allowsUninitializedInput ::
    -                         forall msg msg' .
    -                           Data.ProtoLens.HasField "allowsUninitializedInput" msg msg' =>
    -                           Lens.Family2.Lens msg msg'
    -                             (Data.ProtoLens.Field "allowsUninitializedInput" msg)
    -                             (Data.ProtoLens.Field "allowsUninitializedInput" msg')
    -allowsUninitializedInput
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "allowsUninitializedInput")
    -
    -attr ::
    -     forall msg msg' . Data.ProtoLens.HasField "attr" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "attr" msg)
    -         (Data.ProtoLens.Field "attr" msg')
    -attr
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "attr")
    -
    -defaultValue ::
    -             forall msg msg' .
    -               Data.ProtoLens.HasField "defaultValue" msg msg' =>
    -               Lens.Family2.Lens msg msg'
    -                 (Data.ProtoLens.Field "defaultValue" msg)
    -                 (Data.ProtoLens.Field "defaultValue" msg')
    -defaultValue
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "defaultValue")
    -
    -deprecation ::
    -            forall msg msg' . Data.ProtoLens.HasField "deprecation" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "deprecation" msg)
    -                (Data.ProtoLens.Field "deprecation" msg')
    -deprecation
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "deprecation")
    -
    -description ::
    -            forall msg msg' . Data.ProtoLens.HasField "description" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "description" msg)
    -                (Data.ProtoLens.Field "description" msg')
    -description
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "description")
    -
    -explanation ::
    -            forall msg msg' . Data.ProtoLens.HasField "explanation" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "explanation" msg)
    -                (Data.ProtoLens.Field "explanation" msg')
    -explanation
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "explanation")
    -
    -hasMinimum ::
    -           forall msg msg' . Data.ProtoLens.HasField "hasMinimum" msg msg' =>
    -             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "hasMinimum" msg)
    -               (Data.ProtoLens.Field "hasMinimum" msg')
    -hasMinimum
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "hasMinimum")
    -
    -inputArg ::
    -         forall msg msg' . Data.ProtoLens.HasField "inputArg" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "inputArg" msg)
    -             (Data.ProtoLens.Field "inputArg" msg')
    -inputArg
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "inputArg")
    -
    -isAggregate ::
    -            forall msg msg' . Data.ProtoLens.HasField "isAggregate" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "isAggregate" msg)
    -                (Data.ProtoLens.Field "isAggregate" msg')
    -isAggregate
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "isAggregate")
    -
    -isCommutative ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "isCommutative" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "isCommutative" msg)
    -                  (Data.ProtoLens.Field "isCommutative" msg')
    -isCommutative
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "isCommutative")
    -
    -isRef ::
    -      forall msg msg' . Data.ProtoLens.HasField "isRef" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "isRef" msg)
    -          (Data.ProtoLens.Field "isRef" msg')
    -isRef
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "isRef")
    -
    -isStateful ::
    -           forall msg msg' . Data.ProtoLens.HasField "isStateful" msg msg' =>
    -             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "isStateful" msg)
    -               (Data.ProtoLens.Field "isStateful" msg')
    -isStateful
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "isStateful")
    -
    -maybe'allowedValues ::
    -                    forall msg msg' .
    -                      Data.ProtoLens.HasField "maybe'allowedValues" msg msg' =>
    -                      Lens.Family2.Lens msg msg'
    -                        (Data.ProtoLens.Field "maybe'allowedValues" msg)
    -                        (Data.ProtoLens.Field "maybe'allowedValues" msg')
    -maybe'allowedValues
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'allowedValues")
    -
    -maybe'defaultValue ::
    -                   forall msg msg' .
    -                     Data.ProtoLens.HasField "maybe'defaultValue" msg msg' =>
    -                     Lens.Family2.Lens msg msg'
    -                       (Data.ProtoLens.Field "maybe'defaultValue" msg)
    -                       (Data.ProtoLens.Field "maybe'defaultValue" msg')
    -maybe'defaultValue
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'defaultValue")
    -
    -maybe'deprecation ::
    -                  forall msg msg' .
    -                    Data.ProtoLens.HasField "maybe'deprecation" msg msg' =>
    -                    Lens.Family2.Lens msg msg'
    -                      (Data.ProtoLens.Field "maybe'deprecation" msg)
    -                      (Data.ProtoLens.Field "maybe'deprecation" msg')
    -maybe'deprecation
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'deprecation")
    -
    -minimum ::
    -        forall msg msg' . Data.ProtoLens.HasField "minimum" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "minimum" msg)
    -            (Data.ProtoLens.Field "minimum" msg')
    -minimum
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "minimum")
    -
    -name ::
    -     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    -         (Data.ProtoLens.Field "name" msg')
    -name
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    -
    -numberAttr ::
    -           forall msg msg' . Data.ProtoLens.HasField "numberAttr" msg msg' =>
    -             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "numberAttr" msg)
    -               (Data.ProtoLens.Field "numberAttr" msg')
    -numberAttr
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "numberAttr")
    -
    -op ::
    -   forall msg msg' . Data.ProtoLens.HasField "op" msg msg' =>
    -     Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "op" msg)
    -       (Data.ProtoLens.Field "op" msg')
    -op
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "op")
    -
    -outputArg ::
    -          forall msg msg' . Data.ProtoLens.HasField "outputArg" msg msg' =>
    -            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "outputArg" msg)
    -              (Data.ProtoLens.Field "outputArg" msg')
    -outputArg
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "outputArg")
    -
    -summary ::
    -        forall msg msg' . Data.ProtoLens.HasField "summary" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "summary" msg)
    -            (Data.ProtoLens.Field "summary" msg')
    -summary
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "summary")
    -
    -type' ::
    -      forall msg msg' . Data.ProtoLens.HasField "type'" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "type'" msg)
    -          (Data.ProtoLens.Field "type'" msg')
    -type'
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "type'")
    -
    -typeAttr ::
    -         forall msg msg' . Data.ProtoLens.HasField "typeAttr" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "typeAttr" msg)
    -             (Data.ProtoLens.Field "typeAttr" msg')
    -typeAttr
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "typeAttr")
    -
    -typeListAttr ::
    -             forall msg msg' .
    -               Data.ProtoLens.HasField "typeListAttr" msg msg' =>
    -               Lens.Family2.Lens msg msg'
    -                 (Data.ProtoLens.Field "typeListAttr" msg)
    -                 (Data.ProtoLens.Field "typeListAttr" msg')
    -typeListAttr
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "typeListAttr")
    -
    -version ::
    -        forall msg msg' . Data.ProtoLens.HasField "version" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "version" msg)
    -            (Data.ProtoLens.Field "version" msg')
    -version
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "version")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-ResourceHandle.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-ResourceHandle.html deleted file mode 100644 index 0176f8d..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-ResourceHandle.html +++ /dev/null @@ -1,182 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/ResourceHandle.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/resource_handle.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.ResourceHandle where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -
    -data ResourceHandle = ResourceHandle{_ResourceHandle'device ::
    -                                     Data.Text.Text,
    -                                     _ResourceHandle'container :: Data.Text.Text,
    -                                     _ResourceHandle'name :: Data.Text.Text,
    -                                     _ResourceHandle'hashCode :: Data.Word.Word64,
    -                                     _ResourceHandle'maybeTypeName :: Data.Text.Text}
    -                    deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "device" ResourceHandle =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "device" ResourceHandle
    -         ResourceHandle where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ResourceHandle'device
    -              (\ x__ y__ -> x__{_ResourceHandle'device = y__})
    -
    -type instance Data.ProtoLens.Field "container" ResourceHandle =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "container" ResourceHandle
    -         ResourceHandle where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ResourceHandle'container
    -              (\ x__ y__ -> x__{_ResourceHandle'container = y__})
    -
    -type instance Data.ProtoLens.Field "name" ResourceHandle =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "name" ResourceHandle
    -         ResourceHandle where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ResourceHandle'name
    -              (\ x__ y__ -> x__{_ResourceHandle'name = y__})
    -
    -type instance Data.ProtoLens.Field "hashCode" ResourceHandle =
    -     Data.Word.Word64
    -
    -instance Data.ProtoLens.HasField "hashCode" ResourceHandle
    -         ResourceHandle where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ResourceHandle'hashCode
    -              (\ x__ y__ -> x__{_ResourceHandle'hashCode = y__})
    -
    -type instance Data.ProtoLens.Field "maybeTypeName" ResourceHandle =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "maybeTypeName" ResourceHandle
    -         ResourceHandle where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ResourceHandle'maybeTypeName
    -              (\ x__ y__ -> x__{_ResourceHandle'maybeTypeName = y__})
    -
    -instance Data.Default.Class.Default ResourceHandle where
    -        def
    -          = ResourceHandle{_ResourceHandle'device =
    -                             Data.ProtoLens.fieldDefault,
    -                           _ResourceHandle'container = Data.ProtoLens.fieldDefault,
    -                           _ResourceHandle'name = Data.ProtoLens.fieldDefault,
    -                           _ResourceHandle'hashCode = Data.ProtoLens.fieldDefault,
    -                           _ResourceHandle'maybeTypeName = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message ResourceHandle where
    -        descriptor
    -          = let device__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "device"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
    -                container__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "container"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional container)
    -                name__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    -                hashCode__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "hash_code"
    -                      (Data.ProtoLens.UInt64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional hashCode)
    -                maybeTypeName__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "maybe_type_name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional maybeTypeName)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, device__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, container__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, name__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, hashCode__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, maybeTypeName__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("device", device__field_descriptor),
    -                    ("container", container__field_descriptor),
    -                    ("name", name__field_descriptor),
    -                    ("hash_code", hashCode__field_descriptor),
    -                    ("maybe_type_name", maybeTypeName__field_descriptor)])
    -
    -container ::
    -          forall msg msg' . Data.ProtoLens.HasField "container" msg msg' =>
    -            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "container" msg)
    -              (Data.ProtoLens.Field "container" msg')
    -container
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "container")
    -
    -device ::
    -       forall msg msg' . Data.ProtoLens.HasField "device" msg msg' =>
    -         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "device" msg)
    -           (Data.ProtoLens.Field "device" msg')
    -device
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "device")
    -
    -hashCode ::
    -         forall msg msg' . Data.ProtoLens.HasField "hashCode" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "hashCode" msg)
    -             (Data.ProtoLens.Field "hashCode" msg')
    -hashCode
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "hashCode")
    -
    -maybeTypeName ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "maybeTypeName" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "maybeTypeName" msg)
    -                  (Data.ProtoLens.Field "maybeTypeName" msg')
    -maybeTypeName
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybeTypeName")
    -
    -name ::
    -     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    -         (Data.ProtoLens.Field "name" msg')
    -name
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-StepStats.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-StepStats.html deleted file mode 100644 index 988317e..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-StepStats.html +++ /dev/null @@ -1,653 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/StepStats.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/step_stats.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.StepStats where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -import qualified
    -       Proto.Tensorflow.Core.Framework.AllocationDescription
    -import qualified Proto.Tensorflow.Core.Framework.TensorDescription
    -
    -data AllocatorMemoryUsed = AllocatorMemoryUsed{_AllocatorMemoryUsed'allocatorName
    -                                               :: Data.Text.Text,
    -                                               _AllocatorMemoryUsed'totalBytes :: Data.Int.Int64,
    -                                               _AllocatorMemoryUsed'peakBytes :: Data.Int.Int64}
    -                         deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance
    -     Data.ProtoLens.Field "allocatorName" AllocatorMemoryUsed =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "allocatorName"
    -         AllocatorMemoryUsed AllocatorMemoryUsed where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AllocatorMemoryUsed'allocatorName
    -              (\ x__ y__ -> x__{_AllocatorMemoryUsed'allocatorName = y__})
    -
    -type instance Data.ProtoLens.Field "totalBytes" AllocatorMemoryUsed
    -     = Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "totalBytes" AllocatorMemoryUsed
    -         AllocatorMemoryUsed where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AllocatorMemoryUsed'totalBytes
    -              (\ x__ y__ -> x__{_AllocatorMemoryUsed'totalBytes = y__})
    -
    -type instance Data.ProtoLens.Field "peakBytes" AllocatorMemoryUsed
    -     = Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "peakBytes" AllocatorMemoryUsed
    -         AllocatorMemoryUsed where
    -        field _
    -          = Lens.Family2.Unchecked.lens _AllocatorMemoryUsed'peakBytes
    -              (\ x__ y__ -> x__{_AllocatorMemoryUsed'peakBytes = y__})
    -
    -instance Data.Default.Class.Default AllocatorMemoryUsed where
    -        def
    -          = AllocatorMemoryUsed{_AllocatorMemoryUsed'allocatorName =
    -                                  Data.ProtoLens.fieldDefault,
    -                                _AllocatorMemoryUsed'totalBytes = Data.ProtoLens.fieldDefault,
    -                                _AllocatorMemoryUsed'peakBytes = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message AllocatorMemoryUsed where
    -        descriptor
    -          = let allocatorName__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "allocator_name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorName)
    -                totalBytes__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "total_bytes"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional totalBytes)
    -                peakBytes__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "peak_bytes"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional peakBytes)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, allocatorName__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, totalBytes__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, peakBytes__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("allocator_name", allocatorName__field_descriptor),
    -                    ("total_bytes", totalBytes__field_descriptor),
    -                    ("peak_bytes", peakBytes__field_descriptor)])
    -
    -data DeviceStepStats = DeviceStepStats{_DeviceStepStats'device ::
    -                                       Data.Text.Text,
    -                                       _DeviceStepStats'nodeStats :: [NodeExecStats]}
    -                     deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "device" DeviceStepStats =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "device" DeviceStepStats
    -         DeviceStepStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _DeviceStepStats'device
    -              (\ x__ y__ -> x__{_DeviceStepStats'device = y__})
    -
    -type instance Data.ProtoLens.Field "nodeStats" DeviceStepStats =
    -     [NodeExecStats]
    -
    -instance Data.ProtoLens.HasField "nodeStats" DeviceStepStats
    -         DeviceStepStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _DeviceStepStats'nodeStats
    -              (\ x__ y__ -> x__{_DeviceStepStats'nodeStats = y__})
    -
    -instance Data.Default.Class.Default DeviceStepStats where
    -        def
    -          = DeviceStepStats{_DeviceStepStats'device =
    -                              Data.ProtoLens.fieldDefault,
    -                            _DeviceStepStats'nodeStats = []}
    -
    -instance Data.ProtoLens.Message DeviceStepStats where
    -        descriptor
    -          = let device__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "device"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional device)
    -                nodeStats__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "node_stats"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor NodeExecStats)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked nodeStats)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, device__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, nodeStats__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("device", device__field_descriptor),
    -                    ("node_stats", nodeStats__field_descriptor)])
    -
    -data NodeExecStats = NodeExecStats{_NodeExecStats'nodeName ::
    -                                   Data.Text.Text,
    -                                   _NodeExecStats'allStartMicros :: Data.Int.Int64,
    -                                   _NodeExecStats'opStartRelMicros :: Data.Int.Int64,
    -                                   _NodeExecStats'opEndRelMicros :: Data.Int.Int64,
    -                                   _NodeExecStats'allEndRelMicros :: Data.Int.Int64,
    -                                   _NodeExecStats'memory :: [AllocatorMemoryUsed],
    -                                   _NodeExecStats'output :: [NodeOutput],
    -                                   _NodeExecStats'timelineLabel :: Data.Text.Text,
    -                                   _NodeExecStats'scheduledMicros :: Data.Int.Int64,
    -                                   _NodeExecStats'threadId :: Data.Word.Word32,
    -                                   _NodeExecStats'referencedTensor ::
    -                                   [Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription]}
    -                   deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "nodeName" NodeExecStats =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "nodeName" NodeExecStats
    -         NodeExecStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeExecStats'nodeName
    -              (\ x__ y__ -> x__{_NodeExecStats'nodeName = y__})
    -
    -type instance Data.ProtoLens.Field "allStartMicros" NodeExecStats =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "allStartMicros" NodeExecStats
    -         NodeExecStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeExecStats'allStartMicros
    -              (\ x__ y__ -> x__{_NodeExecStats'allStartMicros = y__})
    -
    -type instance Data.ProtoLens.Field "opStartRelMicros" NodeExecStats
    -     = Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "opStartRelMicros" NodeExecStats
    -         NodeExecStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeExecStats'opStartRelMicros
    -              (\ x__ y__ -> x__{_NodeExecStats'opStartRelMicros = y__})
    -
    -type instance Data.ProtoLens.Field "opEndRelMicros" NodeExecStats =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "opEndRelMicros" NodeExecStats
    -         NodeExecStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeExecStats'opEndRelMicros
    -              (\ x__ y__ -> x__{_NodeExecStats'opEndRelMicros = y__})
    -
    -type instance Data.ProtoLens.Field "allEndRelMicros" NodeExecStats
    -     = Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "allEndRelMicros" NodeExecStats
    -         NodeExecStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeExecStats'allEndRelMicros
    -              (\ x__ y__ -> x__{_NodeExecStats'allEndRelMicros = y__})
    -
    -type instance Data.ProtoLens.Field "memory" NodeExecStats =
    -     [AllocatorMemoryUsed]
    -
    -instance Data.ProtoLens.HasField "memory" NodeExecStats
    -         NodeExecStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeExecStats'memory
    -              (\ x__ y__ -> x__{_NodeExecStats'memory = y__})
    -
    -type instance Data.ProtoLens.Field "output" NodeExecStats =
    -     [NodeOutput]
    -
    -instance Data.ProtoLens.HasField "output" NodeExecStats
    -         NodeExecStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeExecStats'output
    -              (\ x__ y__ -> x__{_NodeExecStats'output = y__})
    -
    -type instance Data.ProtoLens.Field "timelineLabel" NodeExecStats =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "timelineLabel" NodeExecStats
    -         NodeExecStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeExecStats'timelineLabel
    -              (\ x__ y__ -> x__{_NodeExecStats'timelineLabel = y__})
    -
    -type instance Data.ProtoLens.Field "scheduledMicros" NodeExecStats
    -     = Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "scheduledMicros" NodeExecStats
    -         NodeExecStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeExecStats'scheduledMicros
    -              (\ x__ y__ -> x__{_NodeExecStats'scheduledMicros = y__})
    -
    -type instance Data.ProtoLens.Field "threadId" NodeExecStats =
    -     Data.Word.Word32
    -
    -instance Data.ProtoLens.HasField "threadId" NodeExecStats
    -         NodeExecStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeExecStats'threadId
    -              (\ x__ y__ -> x__{_NodeExecStats'threadId = y__})
    -
    -type instance Data.ProtoLens.Field "referencedTensor" NodeExecStats
    -     =
    -     [Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription]
    -
    -instance Data.ProtoLens.HasField "referencedTensor" NodeExecStats
    -         NodeExecStats where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeExecStats'referencedTensor
    -              (\ x__ y__ -> x__{_NodeExecStats'referencedTensor = y__})
    -
    -instance Data.Default.Class.Default NodeExecStats where
    -        def
    -          = NodeExecStats{_NodeExecStats'nodeName =
    -                            Data.ProtoLens.fieldDefault,
    -                          _NodeExecStats'allStartMicros = Data.ProtoLens.fieldDefault,
    -                          _NodeExecStats'opStartRelMicros = Data.ProtoLens.fieldDefault,
    -                          _NodeExecStats'opEndRelMicros = Data.ProtoLens.fieldDefault,
    -                          _NodeExecStats'allEndRelMicros = Data.ProtoLens.fieldDefault,
    -                          _NodeExecStats'memory = [], _NodeExecStats'output = [],
    -                          _NodeExecStats'timelineLabel = Data.ProtoLens.fieldDefault,
    -                          _NodeExecStats'scheduledMicros = Data.ProtoLens.fieldDefault,
    -                          _NodeExecStats'threadId = Data.ProtoLens.fieldDefault,
    -                          _NodeExecStats'referencedTensor = []}
    -
    -instance Data.ProtoLens.Message NodeExecStats where
    -        descriptor
    -          = let nodeName__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "node_name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional nodeName)
    -                allStartMicros__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "all_start_micros"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allStartMicros)
    -                opStartRelMicros__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "op_start_rel_micros"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         opStartRelMicros)
    -                opEndRelMicros__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "op_end_rel_micros"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional opEndRelMicros)
    -                allEndRelMicros__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "all_end_rel_micros"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allEndRelMicros)
    -                memory__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "memory"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor AllocatorMemoryUsed)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked memory)
    -                output__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "output"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor NodeOutput)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked output)
    -                timelineLabel__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "timeline_label"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional timelineLabel)
    -                scheduledMicros__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "scheduled_micros"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional scheduledMicros)
    -                threadId__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "thread_id"
    -                      (Data.ProtoLens.UInt32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Word.Word32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional threadId)
    -                referencedTensor__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "referenced_tensor"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    -                         referencedTensor)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, nodeName__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, allStartMicros__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, opStartRelMicros__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, opEndRelMicros__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, allEndRelMicros__field_descriptor),
    -                    (Data.ProtoLens.Tag 6, memory__field_descriptor),
    -                    (Data.ProtoLens.Tag 7, output__field_descriptor),
    -                    (Data.ProtoLens.Tag 8, timelineLabel__field_descriptor),
    -                    (Data.ProtoLens.Tag 9, scheduledMicros__field_descriptor),
    -                    (Data.ProtoLens.Tag 10, threadId__field_descriptor),
    -                    (Data.ProtoLens.Tag 11, referencedTensor__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("node_name", nodeName__field_descriptor),
    -                    ("all_start_micros", allStartMicros__field_descriptor),
    -                    ("op_start_rel_micros", opStartRelMicros__field_descriptor),
    -                    ("op_end_rel_micros", opEndRelMicros__field_descriptor),
    -                    ("all_end_rel_micros", allEndRelMicros__field_descriptor),
    -                    ("memory", memory__field_descriptor),
    -                    ("output", output__field_descriptor),
    -                    ("timeline_label", timelineLabel__field_descriptor),
    -                    ("scheduled_micros", scheduledMicros__field_descriptor),
    -                    ("thread_id", threadId__field_descriptor),
    -                    ("referenced_tensor", referencedTensor__field_descriptor)])
    -
    -data NodeOutput = NodeOutput{_NodeOutput'slot :: Data.Int.Int32,
    -                             _NodeOutput'tensorDescription ::
    -                             Prelude.Maybe
    -                               Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription}
    -                deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "slot" NodeOutput =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "slot" NodeOutput NodeOutput where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeOutput'slot
    -              (\ x__ y__ -> x__{_NodeOutput'slot = y__})
    -
    -type instance Data.ProtoLens.Field "tensorDescription" NodeOutput =
    -     Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription
    -
    -instance Data.ProtoLens.HasField "tensorDescription" NodeOutput
    -         NodeOutput where
    -        field _
    -          = (Prelude..) maybe'tensorDescription
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance
    -     Data.ProtoLens.Field "maybe'tensorDescription" NodeOutput =
    -     Prelude.Maybe
    -       Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription
    -
    -instance Data.ProtoLens.HasField "maybe'tensorDescription"
    -         NodeOutput NodeOutput where
    -        field _
    -          = Lens.Family2.Unchecked.lens _NodeOutput'tensorDescription
    -              (\ x__ y__ -> x__{_NodeOutput'tensorDescription = y__})
    -
    -instance Data.Default.Class.Default NodeOutput where
    -        def
    -          = NodeOutput{_NodeOutput'slot = Data.ProtoLens.fieldDefault,
    -                       _NodeOutput'tensorDescription = Prelude.Nothing}
    -
    -instance Data.ProtoLens.Message NodeOutput where
    -        descriptor
    -          = let slot__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "slot"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional slot)
    -                tensorDescription__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "tensor_description"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.TensorDescription.TensorDescription)
    -                      (Data.ProtoLens.OptionalField maybe'tensorDescription)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, slot__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, tensorDescription__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("slot", slot__field_descriptor),
    -                    ("tensor_description", tensorDescription__field_descriptor)])
    -
    -data StepStats = StepStats{_StepStats'devStats ::
    -                           [DeviceStepStats]}
    -               deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "devStats" StepStats =
    -     [DeviceStepStats]
    -
    -instance Data.ProtoLens.HasField "devStats" StepStats StepStats
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _StepStats'devStats
    -              (\ x__ y__ -> x__{_StepStats'devStats = y__})
    -
    -instance Data.Default.Class.Default StepStats where
    -        def = StepStats{_StepStats'devStats = []}
    -
    -instance Data.ProtoLens.Message StepStats where
    -        descriptor
    -          = let devStats__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "dev_stats"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor DeviceStepStats)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked devStats)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, devStats__field_descriptor)])
    -                (Data.Map.fromList [("dev_stats", devStats__field_descriptor)])
    -
    -allEndRelMicros ::
    -                forall msg msg' .
    -                  Data.ProtoLens.HasField "allEndRelMicros" msg msg' =>
    -                  Lens.Family2.Lens msg msg'
    -                    (Data.ProtoLens.Field "allEndRelMicros" msg)
    -                    (Data.ProtoLens.Field "allEndRelMicros" msg')
    -allEndRelMicros
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "allEndRelMicros")
    -
    -allStartMicros ::
    -               forall msg msg' .
    -                 Data.ProtoLens.HasField "allStartMicros" msg msg' =>
    -                 Lens.Family2.Lens msg msg'
    -                   (Data.ProtoLens.Field "allStartMicros" msg)
    -                   (Data.ProtoLens.Field "allStartMicros" msg')
    -allStartMicros
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "allStartMicros")
    -
    -allocatorName ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "allocatorName" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "allocatorName" msg)
    -                  (Data.ProtoLens.Field "allocatorName" msg')
    -allocatorName
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "allocatorName")
    -
    -devStats ::
    -         forall msg msg' . Data.ProtoLens.HasField "devStats" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "devStats" msg)
    -             (Data.ProtoLens.Field "devStats" msg')
    -devStats
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "devStats")
    -
    -device ::
    -       forall msg msg' . Data.ProtoLens.HasField "device" msg msg' =>
    -         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "device" msg)
    -           (Data.ProtoLens.Field "device" msg')
    -device
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "device")
    -
    -maybe'tensorDescription ::
    -                        forall msg msg' .
    -                          Data.ProtoLens.HasField "maybe'tensorDescription" msg msg' =>
    -                          Lens.Family2.Lens msg msg'
    -                            (Data.ProtoLens.Field "maybe'tensorDescription" msg)
    -                            (Data.ProtoLens.Field "maybe'tensorDescription" msg')
    -maybe'tensorDescription
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'tensorDescription")
    -
    -memory ::
    -       forall msg msg' . Data.ProtoLens.HasField "memory" msg msg' =>
    -         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "memory" msg)
    -           (Data.ProtoLens.Field "memory" msg')
    -memory
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "memory")
    -
    -nodeName ::
    -         forall msg msg' . Data.ProtoLens.HasField "nodeName" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "nodeName" msg)
    -             (Data.ProtoLens.Field "nodeName" msg')
    -nodeName
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "nodeName")
    -
    -nodeStats ::
    -          forall msg msg' . Data.ProtoLens.HasField "nodeStats" msg msg' =>
    -            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "nodeStats" msg)
    -              (Data.ProtoLens.Field "nodeStats" msg')
    -nodeStats
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "nodeStats")
    -
    -opEndRelMicros ::
    -               forall msg msg' .
    -                 Data.ProtoLens.HasField "opEndRelMicros" msg msg' =>
    -                 Lens.Family2.Lens msg msg'
    -                   (Data.ProtoLens.Field "opEndRelMicros" msg)
    -                   (Data.ProtoLens.Field "opEndRelMicros" msg')
    -opEndRelMicros
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "opEndRelMicros")
    -
    -opStartRelMicros ::
    -                 forall msg msg' .
    -                   Data.ProtoLens.HasField "opStartRelMicros" msg msg' =>
    -                   Lens.Family2.Lens msg msg'
    -                     (Data.ProtoLens.Field "opStartRelMicros" msg)
    -                     (Data.ProtoLens.Field "opStartRelMicros" msg')
    -opStartRelMicros
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "opStartRelMicros")
    -
    -output ::
    -       forall msg msg' . Data.ProtoLens.HasField "output" msg msg' =>
    -         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "output" msg)
    -           (Data.ProtoLens.Field "output" msg')
    -output
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "output")
    -
    -peakBytes ::
    -          forall msg msg' . Data.ProtoLens.HasField "peakBytes" msg msg' =>
    -            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "peakBytes" msg)
    -              (Data.ProtoLens.Field "peakBytes" msg')
    -peakBytes
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "peakBytes")
    -
    -referencedTensor ::
    -                 forall msg msg' .
    -                   Data.ProtoLens.HasField "referencedTensor" msg msg' =>
    -                   Lens.Family2.Lens msg msg'
    -                     (Data.ProtoLens.Field "referencedTensor" msg)
    -                     (Data.ProtoLens.Field "referencedTensor" msg')
    -referencedTensor
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "referencedTensor")
    -
    -scheduledMicros ::
    -                forall msg msg' .
    -                  Data.ProtoLens.HasField "scheduledMicros" msg msg' =>
    -                  Lens.Family2.Lens msg msg'
    -                    (Data.ProtoLens.Field "scheduledMicros" msg)
    -                    (Data.ProtoLens.Field "scheduledMicros" msg')
    -scheduledMicros
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "scheduledMicros")
    -
    -slot ::
    -     forall msg msg' . Data.ProtoLens.HasField "slot" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "slot" msg)
    -         (Data.ProtoLens.Field "slot" msg')
    -slot
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "slot")
    -
    -tensorDescription ::
    -                  forall msg msg' .
    -                    Data.ProtoLens.HasField "tensorDescription" msg msg' =>
    -                    Lens.Family2.Lens msg msg'
    -                      (Data.ProtoLens.Field "tensorDescription" msg)
    -                      (Data.ProtoLens.Field "tensorDescription" msg')
    -tensorDescription
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "tensorDescription")
    -
    -threadId ::
    -         forall msg msg' . Data.ProtoLens.HasField "threadId" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "threadId" msg)
    -             (Data.ProtoLens.Field "threadId" msg')
    -threadId
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "threadId")
    -
    -timelineLabel ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "timelineLabel" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "timelineLabel" msg)
    -                  (Data.ProtoLens.Field "timelineLabel" msg')
    -timelineLabel
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "timelineLabel")
    -
    -totalBytes ::
    -           forall msg msg' . Data.ProtoLens.HasField "totalBytes" msg msg' =>
    -             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "totalBytes" msg)
    -               (Data.ProtoLens.Field "totalBytes" msg')
    -totalBytes
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "totalBytes")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Tensor.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Tensor.html deleted file mode 100644 index a61dacd..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Tensor.html +++ /dev/null @@ -1,448 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/Tensor.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/tensor.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.Tensor where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -import qualified Proto.Tensorflow.Core.Framework.ResourceHandle
    -import qualified Proto.Tensorflow.Core.Framework.TensorShape
    -import qualified Proto.Tensorflow.Core.Framework.Types
    -
    -data TensorProto = TensorProto{_TensorProto'dtype ::
    -                               Proto.Tensorflow.Core.Framework.Types.DataType,
    -                               _TensorProto'tensorShape ::
    -                               Prelude.Maybe
    -                                 Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
    -                               _TensorProto'versionNumber :: Data.Int.Int32,
    -                               _TensorProto'tensorContent :: Data.ByteString.ByteString,
    -                               _TensorProto'halfVal :: [Data.Int.Int32],
    -                               _TensorProto'floatVal :: [Prelude.Float],
    -                               _TensorProto'doubleVal :: [Prelude.Double],
    -                               _TensorProto'intVal :: [Data.Int.Int32],
    -                               _TensorProto'stringVal :: [Data.ByteString.ByteString],
    -                               _TensorProto'scomplexVal :: [Prelude.Float],
    -                               _TensorProto'int64Val :: [Data.Int.Int64],
    -                               _TensorProto'boolVal :: [Prelude.Bool],
    -                               _TensorProto'dcomplexVal :: [Prelude.Double],
    -                               _TensorProto'resourceHandleVal ::
    -                               [Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle]}
    -                 deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "dtype" TensorProto =
    -     Proto.Tensorflow.Core.Framework.Types.DataType
    -
    -instance Data.ProtoLens.HasField "dtype" TensorProto TensorProto
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'dtype
    -              (\ x__ y__ -> x__{_TensorProto'dtype = y__})
    -
    -type instance Data.ProtoLens.Field "tensorShape" TensorProto =
    -     Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    -
    -instance Data.ProtoLens.HasField "tensorShape" TensorProto
    -         TensorProto where
    -        field _
    -          = (Prelude..) maybe'tensorShape
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'tensorShape" TensorProto
    -     =
    -     Prelude.Maybe
    -       Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    -
    -instance Data.ProtoLens.HasField "maybe'tensorShape" TensorProto
    -         TensorProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'tensorShape
    -              (\ x__ y__ -> x__{_TensorProto'tensorShape = y__})
    -
    -type instance Data.ProtoLens.Field "versionNumber" TensorProto =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "versionNumber" TensorProto
    -         TensorProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'versionNumber
    -              (\ x__ y__ -> x__{_TensorProto'versionNumber = y__})
    -
    -type instance Data.ProtoLens.Field "tensorContent" TensorProto =
    -     Data.ByteString.ByteString
    -
    -instance Data.ProtoLens.HasField "tensorContent" TensorProto
    -         TensorProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'tensorContent
    -              (\ x__ y__ -> x__{_TensorProto'tensorContent = y__})
    -
    -type instance Data.ProtoLens.Field "halfVal" TensorProto =
    -     [Data.Int.Int32]
    -
    -instance Data.ProtoLens.HasField "halfVal" TensorProto TensorProto
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'halfVal
    -              (\ x__ y__ -> x__{_TensorProto'halfVal = y__})
    -
    -type instance Data.ProtoLens.Field "floatVal" TensorProto =
    -     [Prelude.Float]
    -
    -instance Data.ProtoLens.HasField "floatVal" TensorProto TensorProto
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'floatVal
    -              (\ x__ y__ -> x__{_TensorProto'floatVal = y__})
    -
    -type instance Data.ProtoLens.Field "doubleVal" TensorProto =
    -     [Prelude.Double]
    -
    -instance Data.ProtoLens.HasField "doubleVal" TensorProto
    -         TensorProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'doubleVal
    -              (\ x__ y__ -> x__{_TensorProto'doubleVal = y__})
    -
    -type instance Data.ProtoLens.Field "intVal" TensorProto =
    -     [Data.Int.Int32]
    -
    -instance Data.ProtoLens.HasField "intVal" TensorProto TensorProto
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'intVal
    -              (\ x__ y__ -> x__{_TensorProto'intVal = y__})
    -
    -type instance Data.ProtoLens.Field "stringVal" TensorProto =
    -     [Data.ByteString.ByteString]
    -
    -instance Data.ProtoLens.HasField "stringVal" TensorProto
    -         TensorProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'stringVal
    -              (\ x__ y__ -> x__{_TensorProto'stringVal = y__})
    -
    -type instance Data.ProtoLens.Field "scomplexVal" TensorProto =
    -     [Prelude.Float]
    -
    -instance Data.ProtoLens.HasField "scomplexVal" TensorProto
    -         TensorProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'scomplexVal
    -              (\ x__ y__ -> x__{_TensorProto'scomplexVal = y__})
    -
    -type instance Data.ProtoLens.Field "int64Val" TensorProto =
    -     [Data.Int.Int64]
    -
    -instance Data.ProtoLens.HasField "int64Val" TensorProto TensorProto
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'int64Val
    -              (\ x__ y__ -> x__{_TensorProto'int64Val = y__})
    -
    -type instance Data.ProtoLens.Field "boolVal" TensorProto =
    -     [Prelude.Bool]
    -
    -instance Data.ProtoLens.HasField "boolVal" TensorProto TensorProto
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'boolVal
    -              (\ x__ y__ -> x__{_TensorProto'boolVal = y__})
    -
    -type instance Data.ProtoLens.Field "dcomplexVal" TensorProto =
    -     [Prelude.Double]
    -
    -instance Data.ProtoLens.HasField "dcomplexVal" TensorProto
    -         TensorProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'dcomplexVal
    -              (\ x__ y__ -> x__{_TensorProto'dcomplexVal = y__})
    -
    -type instance Data.ProtoLens.Field "resourceHandleVal" TensorProto
    -     = [Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle]
    -
    -instance Data.ProtoLens.HasField "resourceHandleVal" TensorProto
    -         TensorProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorProto'resourceHandleVal
    -              (\ x__ y__ -> x__{_TensorProto'resourceHandleVal = y__})
    -
    -instance Data.Default.Class.Default TensorProto where
    -        def
    -          = TensorProto{_TensorProto'dtype = Data.Default.Class.def,
    -                        _TensorProto'tensorShape = Prelude.Nothing,
    -                        _TensorProto'versionNumber = Data.ProtoLens.fieldDefault,
    -                        _TensorProto'tensorContent = Data.ProtoLens.fieldDefault,
    -                        _TensorProto'halfVal = [], _TensorProto'floatVal = [],
    -                        _TensorProto'doubleVal = [], _TensorProto'intVal = [],
    -                        _TensorProto'stringVal = [], _TensorProto'scomplexVal = [],
    -                        _TensorProto'int64Val = [], _TensorProto'boolVal = [],
    -                        _TensorProto'dcomplexVal = [], _TensorProto'resourceHandleVal = []}
    -
    -instance Data.ProtoLens.Message TensorProto where
    -        descriptor
    -          = let dtype__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "dtype"
    -                      (Data.ProtoLens.EnumField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.Types.DataType)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
    -                tensorShape__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "tensor_shape"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
    -                      (Data.ProtoLens.OptionalField maybe'tensorShape)
    -                versionNumber__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "version_number"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional versionNumber)
    -                tensorContent__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "tensor_content"
    -                      (Data.ProtoLens.BytesField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional tensorContent)
    -                halfVal__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "half_val"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed halfVal)
    -                floatVal__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "float_val"
    -                      (Data.ProtoLens.FloatField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed floatVal)
    -                doubleVal__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "double_val"
    -                      (Data.ProtoLens.DoubleField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed doubleVal)
    -                intVal__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "int_val"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed intVal)
    -                stringVal__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "string_val"
    -                      (Data.ProtoLens.BytesField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.ByteString.ByteString)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked stringVal)
    -                scomplexVal__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "scomplex_val"
    -                      (Data.ProtoLens.FloatField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Float)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed scomplexVal)
    -                int64Val__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "int64_val"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed int64Val)
    -                boolVal__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "bool_val"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed boolVal)
    -                dcomplexVal__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "dcomplex_val"
    -                      (Data.ProtoLens.DoubleField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Packed dcomplexVal)
    -                resourceHandleVal__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "resource_handle_val"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.ResourceHandle.ResourceHandle)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    -                         resourceHandleVal)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, dtype__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, tensorShape__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, versionNumber__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, tensorContent__field_descriptor),
    -                    (Data.ProtoLens.Tag 13, halfVal__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, floatVal__field_descriptor),
    -                    (Data.ProtoLens.Tag 6, doubleVal__field_descriptor),
    -                    (Data.ProtoLens.Tag 7, intVal__field_descriptor),
    -                    (Data.ProtoLens.Tag 8, stringVal__field_descriptor),
    -                    (Data.ProtoLens.Tag 9, scomplexVal__field_descriptor),
    -                    (Data.ProtoLens.Tag 10, int64Val__field_descriptor),
    -                    (Data.ProtoLens.Tag 11, boolVal__field_descriptor),
    -                    (Data.ProtoLens.Tag 12, dcomplexVal__field_descriptor),
    -                    (Data.ProtoLens.Tag 14, resourceHandleVal__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("dtype", dtype__field_descriptor),
    -                    ("tensor_shape", tensorShape__field_descriptor),
    -                    ("version_number", versionNumber__field_descriptor),
    -                    ("tensor_content", tensorContent__field_descriptor),
    -                    ("half_val", halfVal__field_descriptor),
    -                    ("float_val", floatVal__field_descriptor),
    -                    ("double_val", doubleVal__field_descriptor),
    -                    ("int_val", intVal__field_descriptor),
    -                    ("string_val", stringVal__field_descriptor),
    -                    ("scomplex_val", scomplexVal__field_descriptor),
    -                    ("int64_val", int64Val__field_descriptor),
    -                    ("bool_val", boolVal__field_descriptor),
    -                    ("dcomplex_val", dcomplexVal__field_descriptor),
    -                    ("resource_handle_val", resourceHandleVal__field_descriptor)])
    -
    -boolVal ::
    -        forall msg msg' . Data.ProtoLens.HasField "boolVal" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "boolVal" msg)
    -            (Data.ProtoLens.Field "boolVal" msg')
    -boolVal
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "boolVal")
    -
    -dcomplexVal ::
    -            forall msg msg' . Data.ProtoLens.HasField "dcomplexVal" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dcomplexVal" msg)
    -                (Data.ProtoLens.Field "dcomplexVal" msg')
    -dcomplexVal
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dcomplexVal")
    -
    -doubleVal ::
    -          forall msg msg' . Data.ProtoLens.HasField "doubleVal" msg msg' =>
    -            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "doubleVal" msg)
    -              (Data.ProtoLens.Field "doubleVal" msg')
    -doubleVal
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "doubleVal")
    -
    -dtype ::
    -      forall msg msg' . Data.ProtoLens.HasField "dtype" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dtype" msg)
    -          (Data.ProtoLens.Field "dtype" msg')
    -dtype
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dtype")
    -
    -floatVal ::
    -         forall msg msg' . Data.ProtoLens.HasField "floatVal" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "floatVal" msg)
    -             (Data.ProtoLens.Field "floatVal" msg')
    -floatVal
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "floatVal")
    -
    -halfVal ::
    -        forall msg msg' . Data.ProtoLens.HasField "halfVal" msg msg' =>
    -          Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "halfVal" msg)
    -            (Data.ProtoLens.Field "halfVal" msg')
    -halfVal
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "halfVal")
    -
    -int64Val ::
    -         forall msg msg' . Data.ProtoLens.HasField "int64Val" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "int64Val" msg)
    -             (Data.ProtoLens.Field "int64Val" msg')
    -int64Val
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "int64Val")
    -
    -intVal ::
    -       forall msg msg' . Data.ProtoLens.HasField "intVal" msg msg' =>
    -         Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "intVal" msg)
    -           (Data.ProtoLens.Field "intVal" msg')
    -intVal
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "intVal")
    -
    -maybe'tensorShape ::
    -                  forall msg msg' .
    -                    Data.ProtoLens.HasField "maybe'tensorShape" msg msg' =>
    -                    Lens.Family2.Lens msg msg'
    -                      (Data.ProtoLens.Field "maybe'tensorShape" msg)
    -                      (Data.ProtoLens.Field "maybe'tensorShape" msg')
    -maybe'tensorShape
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'tensorShape")
    -
    -resourceHandleVal ::
    -                  forall msg msg' .
    -                    Data.ProtoLens.HasField "resourceHandleVal" msg msg' =>
    -                    Lens.Family2.Lens msg msg'
    -                      (Data.ProtoLens.Field "resourceHandleVal" msg)
    -                      (Data.ProtoLens.Field "resourceHandleVal" msg')
    -resourceHandleVal
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "resourceHandleVal")
    -
    -scomplexVal ::
    -            forall msg msg' . Data.ProtoLens.HasField "scomplexVal" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "scomplexVal" msg)
    -                (Data.ProtoLens.Field "scomplexVal" msg')
    -scomplexVal
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "scomplexVal")
    -
    -stringVal ::
    -          forall msg msg' . Data.ProtoLens.HasField "stringVal" msg msg' =>
    -            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "stringVal" msg)
    -              (Data.ProtoLens.Field "stringVal" msg')
    -stringVal
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "stringVal")
    -
    -tensorContent ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "tensorContent" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "tensorContent" msg)
    -                  (Data.ProtoLens.Field "tensorContent" msg')
    -tensorContent
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "tensorContent")
    -
    -tensorShape ::
    -            forall msg msg' . Data.ProtoLens.HasField "tensorShape" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "tensorShape" msg)
    -                (Data.ProtoLens.Field "tensorShape" msg')
    -tensorShape
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "tensorShape")
    -
    -versionNumber ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "versionNumber" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "versionNumber" msg)
    -                  (Data.ProtoLens.Field "versionNumber" msg')
    -versionNumber
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "versionNumber")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorDescription.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorDescription.html deleted file mode 100644 index 9dc62a2..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorDescription.html +++ /dev/null @@ -1,187 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/TensorDescription.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/tensor_description.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.TensorDescription where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -import qualified
    -       Proto.Tensorflow.Core.Framework.AllocationDescription
    -import qualified Proto.Tensorflow.Core.Framework.TensorShape
    -import qualified Proto.Tensorflow.Core.Framework.Types
    -
    -data TensorDescription = TensorDescription{_TensorDescription'dtype
    -                                           :: Proto.Tensorflow.Core.Framework.Types.DataType,
    -                                           _TensorDescription'shape ::
    -                                           Prelude.Maybe
    -                                             Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto,
    -                                           _TensorDescription'allocationDescription ::
    -                                           Prelude.Maybe
    -                                             Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription}
    -                       deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "dtype" TensorDescription =
    -     Proto.Tensorflow.Core.Framework.Types.DataType
    -
    -instance Data.ProtoLens.HasField "dtype" TensorDescription
    -         TensorDescription where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorDescription'dtype
    -              (\ x__ y__ -> x__{_TensorDescription'dtype = y__})
    -
    -type instance Data.ProtoLens.Field "shape" TensorDescription =
    -     Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    -
    -instance Data.ProtoLens.HasField "shape" TensorDescription
    -         TensorDescription where
    -        field _
    -          = (Prelude..) maybe'shape
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'shape" TensorDescription
    -     =
    -     Prelude.Maybe
    -       Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto
    -
    -instance Data.ProtoLens.HasField "maybe'shape" TensorDescription
    -         TensorDescription where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorDescription'shape
    -              (\ x__ y__ -> x__{_TensorDescription'shape = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "allocationDescription" TensorDescription =
    -     Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription
    -
    -instance Data.ProtoLens.HasField "allocationDescription"
    -         TensorDescription TensorDescription where
    -        field _
    -          = (Prelude..) maybe'allocationDescription
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance
    -     Data.ProtoLens.Field "maybe'allocationDescription"
    -       TensorDescription
    -     =
    -     Prelude.Maybe
    -       Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription
    -
    -instance Data.ProtoLens.HasField "maybe'allocationDescription"
    -         TensorDescription TensorDescription where
    -        field _
    -          = Lens.Family2.Unchecked.lens
    -              _TensorDescription'allocationDescription
    -              (\ x__ y__ -> x__{_TensorDescription'allocationDescription = y__})
    -
    -instance Data.Default.Class.Default TensorDescription where
    -        def
    -          = TensorDescription{_TensorDescription'dtype =
    -                                Data.Default.Class.def,
    -                              _TensorDescription'shape = Prelude.Nothing,
    -                              _TensorDescription'allocationDescription = Prelude.Nothing}
    -
    -instance Data.ProtoLens.Message TensorDescription where
    -        descriptor
    -          = let dtype__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "dtype"
    -                      (Data.ProtoLens.EnumField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.Types.DataType)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional dtype)
    -                shape__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "shape"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.TensorShape.TensorShapeProto)
    -                      (Data.ProtoLens.OptionalField maybe'shape)
    -                allocationDescription__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "allocation_description"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.AllocationDescription.AllocationDescription)
    -                      (Data.ProtoLens.OptionalField maybe'allocationDescription)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, dtype__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, shape__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, allocationDescription__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("dtype", dtype__field_descriptor),
    -                    ("shape", shape__field_descriptor),
    -                    ("allocation_description",
    -                     allocationDescription__field_descriptor)])
    -
    -allocationDescription ::
    -                      forall msg msg' .
    -                        Data.ProtoLens.HasField "allocationDescription" msg msg' =>
    -                        Lens.Family2.Lens msg msg'
    -                          (Data.ProtoLens.Field "allocationDescription" msg)
    -                          (Data.ProtoLens.Field "allocationDescription" msg')
    -allocationDescription
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "allocationDescription")
    -
    -dtype ::
    -      forall msg msg' . Data.ProtoLens.HasField "dtype" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dtype" msg)
    -          (Data.ProtoLens.Field "dtype" msg')
    -dtype
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dtype")
    -
    -maybe'allocationDescription ::
    -                            forall msg msg' .
    -                              Data.ProtoLens.HasField "maybe'allocationDescription" msg msg' =>
    -                              Lens.Family2.Lens msg msg'
    -                                (Data.ProtoLens.Field "maybe'allocationDescription" msg)
    -                                (Data.ProtoLens.Field "maybe'allocationDescription" msg')
    -maybe'allocationDescription
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'allocationDescription")
    -
    -maybe'shape ::
    -            forall msg msg' . Data.ProtoLens.HasField "maybe'shape" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "maybe'shape" msg)
    -                (Data.ProtoLens.Field "maybe'shape" msg')
    -maybe'shape
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "maybe'shape")
    -
    -shape ::
    -      forall msg msg' . Data.ProtoLens.HasField "shape" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "shape" msg)
    -          (Data.ProtoLens.Field "shape" msg')
    -shape
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "shape")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorShape.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorShape.html deleted file mode 100644 index b090454..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-TensorShape.html +++ /dev/null @@ -1,166 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/TensorShape.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/tensor_shape.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.TensorShape where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -
    -data TensorShapeProto = TensorShapeProto{_TensorShapeProto'dim ::
    -                                         [TensorShapeProto'Dim],
    -                                         _TensorShapeProto'unknownRank :: Prelude.Bool}
    -                      deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "dim" TensorShapeProto =
    -     [TensorShapeProto'Dim]
    -
    -instance Data.ProtoLens.HasField "dim" TensorShapeProto
    -         TensorShapeProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorShapeProto'dim
    -              (\ x__ y__ -> x__{_TensorShapeProto'dim = y__})
    -
    -type instance Data.ProtoLens.Field "unknownRank" TensorShapeProto =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "unknownRank" TensorShapeProto
    -         TensorShapeProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorShapeProto'unknownRank
    -              (\ x__ y__ -> x__{_TensorShapeProto'unknownRank = y__})
    -
    -instance Data.Default.Class.Default TensorShapeProto where
    -        def
    -          = TensorShapeProto{_TensorShapeProto'dim = [],
    -                             _TensorShapeProto'unknownRank = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message TensorShapeProto where
    -        descriptor
    -          = let dim__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "dim"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor TensorShapeProto'Dim)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked dim)
    -                unknownRank__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "unknown_rank"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional unknownRank)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 2, dim__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, unknownRank__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("dim", dim__field_descriptor),
    -                    ("unknown_rank", unknownRank__field_descriptor)])
    -
    -data TensorShapeProto'Dim = TensorShapeProto'Dim{_TensorShapeProto'Dim'size
    -                                                 :: Data.Int.Int64,
    -                                                 _TensorShapeProto'Dim'name :: Data.Text.Text}
    -                          deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "size" TensorShapeProto'Dim =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "size" TensorShapeProto'Dim
    -         TensorShapeProto'Dim where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorShapeProto'Dim'size
    -              (\ x__ y__ -> x__{_TensorShapeProto'Dim'size = y__})
    -
    -type instance Data.ProtoLens.Field "name" TensorShapeProto'Dim =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "name" TensorShapeProto'Dim
    -         TensorShapeProto'Dim where
    -        field _
    -          = Lens.Family2.Unchecked.lens _TensorShapeProto'Dim'name
    -              (\ x__ y__ -> x__{_TensorShapeProto'Dim'name = y__})
    -
    -instance Data.Default.Class.Default TensorShapeProto'Dim where
    -        def
    -          = TensorShapeProto'Dim{_TensorShapeProto'Dim'size =
    -                                   Data.ProtoLens.fieldDefault,
    -                                 _TensorShapeProto'Dim'name = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message TensorShapeProto'Dim where
    -        descriptor
    -          = let size__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "size"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional size)
    -                name__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional name)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, size__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, name__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("size", size__field_descriptor),
    -                    ("name", name__field_descriptor)])
    -
    -dim ::
    -    forall msg msg' . Data.ProtoLens.HasField "dim" msg msg' =>
    -      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "dim" msg)
    -        (Data.ProtoLens.Field "dim" msg')
    -dim
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "dim")
    -
    -name ::
    -     forall msg msg' . Data.ProtoLens.HasField "name" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "name" msg)
    -         (Data.ProtoLens.Field "name" msg')
    -name
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "name")
    -
    -size ::
    -     forall msg msg' . Data.ProtoLens.HasField "size" msg msg' =>
    -       Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "size" msg)
    -         (Data.ProtoLens.Field "size" msg')
    -size
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "size")
    -
    -unknownRank ::
    -            forall msg msg' . Data.ProtoLens.HasField "unknownRank" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "unknownRank" msg)
    -                (Data.ProtoLens.Field "unknownRank" msg')
    -unknownRank
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "unknownRank")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Types.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Types.html deleted file mode 100644 index 086d4e7..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Types.html +++ /dev/null @@ -1,355 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/Types.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/types.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.Types where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -
    -data DataType = DT_INVALID
    -              | DT_FLOAT
    -              | DT_DOUBLE
    -              | DT_INT32
    -              | DT_UINT8
    -              | DT_INT16
    -              | DT_INT8
    -              | DT_STRING
    -              | DT_COMPLEX64
    -              | DT_INT64
    -              | DT_BOOL
    -              | DT_QINT8
    -              | DT_QUINT8
    -              | DT_QINT32
    -              | DT_BFLOAT16
    -              | DT_QINT16
    -              | DT_QUINT16
    -              | DT_UINT16
    -              | DT_COMPLEX128
    -              | DT_HALF
    -              | DT_RESOURCE
    -              | DT_FLOAT_REF
    -              | DT_DOUBLE_REF
    -              | DT_INT32_REF
    -              | DT_UINT8_REF
    -              | DT_INT16_REF
    -              | DT_INT8_REF
    -              | DT_STRING_REF
    -              | DT_COMPLEX64_REF
    -              | DT_INT64_REF
    -              | DT_BOOL_REF
    -              | DT_QINT8_REF
    -              | DT_QUINT8_REF
    -              | DT_QINT32_REF
    -              | DT_BFLOAT16_REF
    -              | DT_QINT16_REF
    -              | DT_QUINT16_REF
    -              | DT_UINT16_REF
    -              | DT_COMPLEX128_REF
    -              | DT_HALF_REF
    -              | DT_RESOURCE_REF
    -              deriving (Prelude.Show, Prelude.Eq)
    -
    -instance Data.Default.Class.Default DataType where
    -        def = DT_INVALID
    -
    -instance Data.ProtoLens.FieldDefault DataType where
    -        fieldDefault = DT_INVALID
    -
    -instance Data.ProtoLens.MessageEnum DataType where
    -        maybeToEnum 0 = Prelude.Just DT_INVALID
    -        maybeToEnum 1 = Prelude.Just DT_FLOAT
    -        maybeToEnum 2 = Prelude.Just DT_DOUBLE
    -        maybeToEnum 3 = Prelude.Just DT_INT32
    -        maybeToEnum 4 = Prelude.Just DT_UINT8
    -        maybeToEnum 5 = Prelude.Just DT_INT16
    -        maybeToEnum 6 = Prelude.Just DT_INT8
    -        maybeToEnum 7 = Prelude.Just DT_STRING
    -        maybeToEnum 8 = Prelude.Just DT_COMPLEX64
    -        maybeToEnum 9 = Prelude.Just DT_INT64
    -        maybeToEnum 10 = Prelude.Just DT_BOOL
    -        maybeToEnum 11 = Prelude.Just DT_QINT8
    -        maybeToEnum 12 = Prelude.Just DT_QUINT8
    -        maybeToEnum 13 = Prelude.Just DT_QINT32
    -        maybeToEnum 14 = Prelude.Just DT_BFLOAT16
    -        maybeToEnum 15 = Prelude.Just DT_QINT16
    -        maybeToEnum 16 = Prelude.Just DT_QUINT16
    -        maybeToEnum 17 = Prelude.Just DT_UINT16
    -        maybeToEnum 18 = Prelude.Just DT_COMPLEX128
    -        maybeToEnum 19 = Prelude.Just DT_HALF
    -        maybeToEnum 20 = Prelude.Just DT_RESOURCE
    -        maybeToEnum 101 = Prelude.Just DT_FLOAT_REF
    -        maybeToEnum 102 = Prelude.Just DT_DOUBLE_REF
    -        maybeToEnum 103 = Prelude.Just DT_INT32_REF
    -        maybeToEnum 104 = Prelude.Just DT_UINT8_REF
    -        maybeToEnum 105 = Prelude.Just DT_INT16_REF
    -        maybeToEnum 106 = Prelude.Just DT_INT8_REF
    -        maybeToEnum 107 = Prelude.Just DT_STRING_REF
    -        maybeToEnum 108 = Prelude.Just DT_COMPLEX64_REF
    -        maybeToEnum 109 = Prelude.Just DT_INT64_REF
    -        maybeToEnum 110 = Prelude.Just DT_BOOL_REF
    -        maybeToEnum 111 = Prelude.Just DT_QINT8_REF
    -        maybeToEnum 112 = Prelude.Just DT_QUINT8_REF
    -        maybeToEnum 113 = Prelude.Just DT_QINT32_REF
    -        maybeToEnum 114 = Prelude.Just DT_BFLOAT16_REF
    -        maybeToEnum 115 = Prelude.Just DT_QINT16_REF
    -        maybeToEnum 116 = Prelude.Just DT_QUINT16_REF
    -        maybeToEnum 117 = Prelude.Just DT_UINT16_REF
    -        maybeToEnum 118 = Prelude.Just DT_COMPLEX128_REF
    -        maybeToEnum 119 = Prelude.Just DT_HALF_REF
    -        maybeToEnum 120 = Prelude.Just DT_RESOURCE_REF
    -        maybeToEnum _ = Prelude.Nothing
    -        showEnum DT_INVALID = "DT_INVALID"
    -        showEnum DT_FLOAT = "DT_FLOAT"
    -        showEnum DT_DOUBLE = "DT_DOUBLE"
    -        showEnum DT_INT32 = "DT_INT32"
    -        showEnum DT_UINT8 = "DT_UINT8"
    -        showEnum DT_INT16 = "DT_INT16"
    -        showEnum DT_INT8 = "DT_INT8"
    -        showEnum DT_STRING = "DT_STRING"
    -        showEnum DT_COMPLEX64 = "DT_COMPLEX64"
    -        showEnum DT_INT64 = "DT_INT64"
    -        showEnum DT_BOOL = "DT_BOOL"
    -        showEnum DT_QINT8 = "DT_QINT8"
    -        showEnum DT_QUINT8 = "DT_QUINT8"
    -        showEnum DT_QINT32 = "DT_QINT32"
    -        showEnum DT_BFLOAT16 = "DT_BFLOAT16"
    -        showEnum DT_QINT16 = "DT_QINT16"
    -        showEnum DT_QUINT16 = "DT_QUINT16"
    -        showEnum DT_UINT16 = "DT_UINT16"
    -        showEnum DT_COMPLEX128 = "DT_COMPLEX128"
    -        showEnum DT_HALF = "DT_HALF"
    -        showEnum DT_RESOURCE = "DT_RESOURCE"
    -        showEnum DT_FLOAT_REF = "DT_FLOAT_REF"
    -        showEnum DT_DOUBLE_REF = "DT_DOUBLE_REF"
    -        showEnum DT_INT32_REF = "DT_INT32_REF"
    -        showEnum DT_UINT8_REF = "DT_UINT8_REF"
    -        showEnum DT_INT16_REF = "DT_INT16_REF"
    -        showEnum DT_INT8_REF = "DT_INT8_REF"
    -        showEnum DT_STRING_REF = "DT_STRING_REF"
    -        showEnum DT_COMPLEX64_REF = "DT_COMPLEX64_REF"
    -        showEnum DT_INT64_REF = "DT_INT64_REF"
    -        showEnum DT_BOOL_REF = "DT_BOOL_REF"
    -        showEnum DT_QINT8_REF = "DT_QINT8_REF"
    -        showEnum DT_QUINT8_REF = "DT_QUINT8_REF"
    -        showEnum DT_QINT32_REF = "DT_QINT32_REF"
    -        showEnum DT_BFLOAT16_REF = "DT_BFLOAT16_REF"
    -        showEnum DT_QINT16_REF = "DT_QINT16_REF"
    -        showEnum DT_QUINT16_REF = "DT_QUINT16_REF"
    -        showEnum DT_UINT16_REF = "DT_UINT16_REF"
    -        showEnum DT_COMPLEX128_REF = "DT_COMPLEX128_REF"
    -        showEnum DT_HALF_REF = "DT_HALF_REF"
    -        showEnum DT_RESOURCE_REF = "DT_RESOURCE_REF"
    -        readEnum "DT_INVALID" = Prelude.Just DT_INVALID
    -        readEnum "DT_FLOAT" = Prelude.Just DT_FLOAT
    -        readEnum "DT_DOUBLE" = Prelude.Just DT_DOUBLE
    -        readEnum "DT_INT32" = Prelude.Just DT_INT32
    -        readEnum "DT_UINT8" = Prelude.Just DT_UINT8
    -        readEnum "DT_INT16" = Prelude.Just DT_INT16
    -        readEnum "DT_INT8" = Prelude.Just DT_INT8
    -        readEnum "DT_STRING" = Prelude.Just DT_STRING
    -        readEnum "DT_COMPLEX64" = Prelude.Just DT_COMPLEX64
    -        readEnum "DT_INT64" = Prelude.Just DT_INT64
    -        readEnum "DT_BOOL" = Prelude.Just DT_BOOL
    -        readEnum "DT_QINT8" = Prelude.Just DT_QINT8
    -        readEnum "DT_QUINT8" = Prelude.Just DT_QUINT8
    -        readEnum "DT_QINT32" = Prelude.Just DT_QINT32
    -        readEnum "DT_BFLOAT16" = Prelude.Just DT_BFLOAT16
    -        readEnum "DT_QINT16" = Prelude.Just DT_QINT16
    -        readEnum "DT_QUINT16" = Prelude.Just DT_QUINT16
    -        readEnum "DT_UINT16" = Prelude.Just DT_UINT16
    -        readEnum "DT_COMPLEX128" = Prelude.Just DT_COMPLEX128
    -        readEnum "DT_HALF" = Prelude.Just DT_HALF
    -        readEnum "DT_RESOURCE" = Prelude.Just DT_RESOURCE
    -        readEnum "DT_FLOAT_REF" = Prelude.Just DT_FLOAT_REF
    -        readEnum "DT_DOUBLE_REF" = Prelude.Just DT_DOUBLE_REF
    -        readEnum "DT_INT32_REF" = Prelude.Just DT_INT32_REF
    -        readEnum "DT_UINT8_REF" = Prelude.Just DT_UINT8_REF
    -        readEnum "DT_INT16_REF" = Prelude.Just DT_INT16_REF
    -        readEnum "DT_INT8_REF" = Prelude.Just DT_INT8_REF
    -        readEnum "DT_STRING_REF" = Prelude.Just DT_STRING_REF
    -        readEnum "DT_COMPLEX64_REF" = Prelude.Just DT_COMPLEX64_REF
    -        readEnum "DT_INT64_REF" = Prelude.Just DT_INT64_REF
    -        readEnum "DT_BOOL_REF" = Prelude.Just DT_BOOL_REF
    -        readEnum "DT_QINT8_REF" = Prelude.Just DT_QINT8_REF
    -        readEnum "DT_QUINT8_REF" = Prelude.Just DT_QUINT8_REF
    -        readEnum "DT_QINT32_REF" = Prelude.Just DT_QINT32_REF
    -        readEnum "DT_BFLOAT16_REF" = Prelude.Just DT_BFLOAT16_REF
    -        readEnum "DT_QINT16_REF" = Prelude.Just DT_QINT16_REF
    -        readEnum "DT_QUINT16_REF" = Prelude.Just DT_QUINT16_REF
    -        readEnum "DT_UINT16_REF" = Prelude.Just DT_UINT16_REF
    -        readEnum "DT_COMPLEX128_REF" = Prelude.Just DT_COMPLEX128_REF
    -        readEnum "DT_HALF_REF" = Prelude.Just DT_HALF_REF
    -        readEnum "DT_RESOURCE_REF" = Prelude.Just DT_RESOURCE_REF
    -        readEnum _ = Prelude.Nothing
    -
    -instance Prelude.Enum DataType where
    -        toEnum k__
    -          = Prelude.maybe
    -              (Prelude.error
    -                 ((Prelude.++) "toEnum: unknown value for enum DataType: "
    -                    (Prelude.show k__)))
    -              Prelude.id
    -              (Data.ProtoLens.maybeToEnum k__)
    -        fromEnum DT_INVALID = 0
    -        fromEnum DT_FLOAT = 1
    -        fromEnum DT_DOUBLE = 2
    -        fromEnum DT_INT32 = 3
    -        fromEnum DT_UINT8 = 4
    -        fromEnum DT_INT16 = 5
    -        fromEnum DT_INT8 = 6
    -        fromEnum DT_STRING = 7
    -        fromEnum DT_COMPLEX64 = 8
    -        fromEnum DT_INT64 = 9
    -        fromEnum DT_BOOL = 10
    -        fromEnum DT_QINT8 = 11
    -        fromEnum DT_QUINT8 = 12
    -        fromEnum DT_QINT32 = 13
    -        fromEnum DT_BFLOAT16 = 14
    -        fromEnum DT_QINT16 = 15
    -        fromEnum DT_QUINT16 = 16
    -        fromEnum DT_UINT16 = 17
    -        fromEnum DT_COMPLEX128 = 18
    -        fromEnum DT_HALF = 19
    -        fromEnum DT_RESOURCE = 20
    -        fromEnum DT_FLOAT_REF = 101
    -        fromEnum DT_DOUBLE_REF = 102
    -        fromEnum DT_INT32_REF = 103
    -        fromEnum DT_UINT8_REF = 104
    -        fromEnum DT_INT16_REF = 105
    -        fromEnum DT_INT8_REF = 106
    -        fromEnum DT_STRING_REF = 107
    -        fromEnum DT_COMPLEX64_REF = 108
    -        fromEnum DT_INT64_REF = 109
    -        fromEnum DT_BOOL_REF = 110
    -        fromEnum DT_QINT8_REF = 111
    -        fromEnum DT_QUINT8_REF = 112
    -        fromEnum DT_QINT32_REF = 113
    -        fromEnum DT_BFLOAT16_REF = 114
    -        fromEnum DT_QINT16_REF = 115
    -        fromEnum DT_QUINT16_REF = 116
    -        fromEnum DT_UINT16_REF = 117
    -        fromEnum DT_COMPLEX128_REF = 118
    -        fromEnum DT_HALF_REF = 119
    -        fromEnum DT_RESOURCE_REF = 120
    -        succ DT_RESOURCE_REF
    -          = Prelude.error
    -              "Ident \"DataType\".Ident \"succ\": bad argument Ident \"DT_RESOURCE_REF\". This value would be out of bounds."
    -        succ DT_INVALID = DT_FLOAT
    -        succ DT_FLOAT = DT_DOUBLE
    -        succ DT_DOUBLE = DT_INT32
    -        succ DT_INT32 = DT_UINT8
    -        succ DT_UINT8 = DT_INT16
    -        succ DT_INT16 = DT_INT8
    -        succ DT_INT8 = DT_STRING
    -        succ DT_STRING = DT_COMPLEX64
    -        succ DT_COMPLEX64 = DT_INT64
    -        succ DT_INT64 = DT_BOOL
    -        succ DT_BOOL = DT_QINT8
    -        succ DT_QINT8 = DT_QUINT8
    -        succ DT_QUINT8 = DT_QINT32
    -        succ DT_QINT32 = DT_BFLOAT16
    -        succ DT_BFLOAT16 = DT_QINT16
    -        succ DT_QINT16 = DT_QUINT16
    -        succ DT_QUINT16 = DT_UINT16
    -        succ DT_UINT16 = DT_COMPLEX128
    -        succ DT_COMPLEX128 = DT_HALF
    -        succ DT_HALF = DT_RESOURCE
    -        succ DT_RESOURCE = DT_FLOAT_REF
    -        succ DT_FLOAT_REF = DT_DOUBLE_REF
    -        succ DT_DOUBLE_REF = DT_INT32_REF
    -        succ DT_INT32_REF = DT_UINT8_REF
    -        succ DT_UINT8_REF = DT_INT16_REF
    -        succ DT_INT16_REF = DT_INT8_REF
    -        succ DT_INT8_REF = DT_STRING_REF
    -        succ DT_STRING_REF = DT_COMPLEX64_REF
    -        succ DT_COMPLEX64_REF = DT_INT64_REF
    -        succ DT_INT64_REF = DT_BOOL_REF
    -        succ DT_BOOL_REF = DT_QINT8_REF
    -        succ DT_QINT8_REF = DT_QUINT8_REF
    -        succ DT_QUINT8_REF = DT_QINT32_REF
    -        succ DT_QINT32_REF = DT_BFLOAT16_REF
    -        succ DT_BFLOAT16_REF = DT_QINT16_REF
    -        succ DT_QINT16_REF = DT_QUINT16_REF
    -        succ DT_QUINT16_REF = DT_UINT16_REF
    -        succ DT_UINT16_REF = DT_COMPLEX128_REF
    -        succ DT_COMPLEX128_REF = DT_HALF_REF
    -        succ DT_HALF_REF = DT_RESOURCE_REF
    -        pred DT_INVALID
    -          = Prelude.error
    -              "Ident \"DataType\".Ident \"pred\": bad argument Ident \"DT_INVALID\". This value would be out of bounds."
    -        pred DT_FLOAT = DT_INVALID
    -        pred DT_DOUBLE = DT_FLOAT
    -        pred DT_INT32 = DT_DOUBLE
    -        pred DT_UINT8 = DT_INT32
    -        pred DT_INT16 = DT_UINT8
    -        pred DT_INT8 = DT_INT16
    -        pred DT_STRING = DT_INT8
    -        pred DT_COMPLEX64 = DT_STRING
    -        pred DT_INT64 = DT_COMPLEX64
    -        pred DT_BOOL = DT_INT64
    -        pred DT_QINT8 = DT_BOOL
    -        pred DT_QUINT8 = DT_QINT8
    -        pred DT_QINT32 = DT_QUINT8
    -        pred DT_BFLOAT16 = DT_QINT32
    -        pred DT_QINT16 = DT_BFLOAT16
    -        pred DT_QUINT16 = DT_QINT16
    -        pred DT_UINT16 = DT_QUINT16
    -        pred DT_COMPLEX128 = DT_UINT16
    -        pred DT_HALF = DT_COMPLEX128
    -        pred DT_RESOURCE = DT_HALF
    -        pred DT_FLOAT_REF = DT_RESOURCE
    -        pred DT_DOUBLE_REF = DT_FLOAT_REF
    -        pred DT_INT32_REF = DT_DOUBLE_REF
    -        pred DT_UINT8_REF = DT_INT32_REF
    -        pred DT_INT16_REF = DT_UINT8_REF
    -        pred DT_INT8_REF = DT_INT16_REF
    -        pred DT_STRING_REF = DT_INT8_REF
    -        pred DT_COMPLEX64_REF = DT_STRING_REF
    -        pred DT_INT64_REF = DT_COMPLEX64_REF
    -        pred DT_BOOL_REF = DT_INT64_REF
    -        pred DT_QINT8_REF = DT_BOOL_REF
    -        pred DT_QUINT8_REF = DT_QINT8_REF
    -        pred DT_QINT32_REF = DT_QUINT8_REF
    -        pred DT_BFLOAT16_REF = DT_QINT32_REF
    -        pred DT_QINT16_REF = DT_BFLOAT16_REF
    -        pred DT_QUINT16_REF = DT_QINT16_REF
    -        pred DT_UINT16_REF = DT_QUINT16_REF
    -        pred DT_COMPLEX128_REF = DT_UINT16_REF
    -        pred DT_HALF_REF = DT_COMPLEX128_REF
    -        pred DT_RESOURCE_REF = DT_HALF_REF
    -        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
    -        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
    -        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
    -        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
    -
    -instance Prelude.Bounded DataType where
    -        minBound = DT_INVALID
    -        maxBound = DT_RESOURCE_REF
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Versions.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Versions.html deleted file mode 100644 index d37bb15..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Framework-Versions.html +++ /dev/null @@ -1,128 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Framework/Versions.hs - - - -
    {- This file was auto-generated from tensorflow/core/framework/versions.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Framework.Versions where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -
    -data VersionDef = VersionDef{_VersionDef'producer ::
    -                             Data.Int.Int32,
    -                             _VersionDef'minConsumer :: Data.Int.Int32,
    -                             _VersionDef'badConsumers :: [Data.Int.Int32]}
    -                deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "producer" VersionDef =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "producer" VersionDef VersionDef
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _VersionDef'producer
    -              (\ x__ y__ -> x__{_VersionDef'producer = y__})
    -
    -type instance Data.ProtoLens.Field "minConsumer" VersionDef =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "minConsumer" VersionDef
    -         VersionDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _VersionDef'minConsumer
    -              (\ x__ y__ -> x__{_VersionDef'minConsumer = y__})
    -
    -type instance Data.ProtoLens.Field "badConsumers" VersionDef =
    -     [Data.Int.Int32]
    -
    -instance Data.ProtoLens.HasField "badConsumers" VersionDef
    -         VersionDef where
    -        field _
    -          = Lens.Family2.Unchecked.lens _VersionDef'badConsumers
    -              (\ x__ y__ -> x__{_VersionDef'badConsumers = y__})
    -
    -instance Data.Default.Class.Default VersionDef where
    -        def
    -          = VersionDef{_VersionDef'producer = Data.ProtoLens.fieldDefault,
    -                       _VersionDef'minConsumer = Data.ProtoLens.fieldDefault,
    -                       _VersionDef'badConsumers = []}
    -
    -instance Data.ProtoLens.Message VersionDef where
    -        descriptor
    -          = let producer__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "producer"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional producer)
    -                minConsumer__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "min_consumer"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional minConsumer)
    -                badConsumers__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "bad_consumers"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked badConsumers)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, producer__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, minConsumer__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, badConsumers__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("producer", producer__field_descriptor),
    -                    ("min_consumer", minConsumer__field_descriptor),
    -                    ("bad_consumers", badConsumers__field_descriptor)])
    -
    -badConsumers ::
    -             forall msg msg' .
    -               Data.ProtoLens.HasField "badConsumers" msg msg' =>
    -               Lens.Family2.Lens msg msg'
    -                 (Data.ProtoLens.Field "badConsumers" msg)
    -                 (Data.ProtoLens.Field "badConsumers" msg')
    -badConsumers
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "badConsumers")
    -
    -minConsumer ::
    -            forall msg msg' . Data.ProtoLens.HasField "minConsumer" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "minConsumer" msg)
    -                (Data.ProtoLens.Field "minConsumer" msg')
    -minConsumer
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "minConsumer")
    -
    -producer ::
    -         forall msg msg' . Data.ProtoLens.HasField "producer" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "producer" msg)
    -             (Data.ProtoLens.Field "producer" msg')
    -producer
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "producer")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Protobuf-Config.html b/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Protobuf-Config.html deleted file mode 100644 index 3399ec9..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/Proto-Tensorflow-Core-Protobuf-Config.html +++ /dev/null @@ -1,1671 +0,0 @@ - - - - - -.stack-work/dist/x86_64-osx/Cabal-1.22.5.0/build/autogen/Proto/Tensorflow/Core/Protobuf/Config.hs - - - -
    {- This file was auto-generated from tensorflow/core/protobuf/config.proto by the proto-lens-protoc program. -}
    -{-# LANGUAGE ScopedTypeVariables, DataKinds, TypeFamilies,
    -  MultiParamTypeClasses, FlexibleContexts, FlexibleInstances,
    -  PatternSynonyms #-}
    -{-# OPTIONS_GHC -fno-warn-unused-imports#-}
    -module Proto.Tensorflow.Core.Protobuf.Config where
    -import qualified Prelude
    -import qualified Data.Int
    -import qualified Data.Word
    -import qualified Data.ProtoLens.Reexport.Data.ProtoLens
    -       as Data.ProtoLens
    -import qualified
    -       Data.ProtoLens.Reexport.Data.ProtoLens.Message.Enum
    -       as Data.ProtoLens.Message.Enum
    -import qualified Data.ProtoLens.Reexport.Lens.Family2
    -       as Lens.Family2
    -import qualified Data.ProtoLens.Reexport.Lens.Family2.Unchecked
    -       as Lens.Family2.Unchecked
    -import qualified Data.ProtoLens.Reexport.Data.Default.Class
    -       as Data.Default.Class
    -import qualified Data.ProtoLens.Reexport.Data.Text as Data.Text
    -import qualified Data.ProtoLens.Reexport.Data.Map as Data.Map
    -import qualified Data.ProtoLens.Reexport.Data.ByteString
    -       as Data.ByteString
    -import qualified Proto.Tensorflow.Core.Framework.CostGraph
    -import qualified Proto.Tensorflow.Core.Framework.Graph
    -import qualified Proto.Tensorflow.Core.Framework.StepStats
    -
    -data ConfigProto = ConfigProto{_ConfigProto'deviceCount ::
    -                               Data.Map.Map Data.Text.Text Data.Int.Int32,
    -                               _ConfigProto'intraOpParallelismThreads :: Data.Int.Int32,
    -                               _ConfigProto'interOpParallelismThreads :: Data.Int.Int32,
    -                               _ConfigProto'usePerSessionThreads :: Prelude.Bool,
    -                               _ConfigProto'sessionInterOpThreadPool :: [ThreadPoolOptionProto],
    -                               _ConfigProto'placementPeriod :: Data.Int.Int32,
    -                               _ConfigProto'deviceFilters :: [Data.Text.Text],
    -                               _ConfigProto'gpuOptions :: Prelude.Maybe GPUOptions,
    -                               _ConfigProto'allowSoftPlacement :: Prelude.Bool,
    -                               _ConfigProto'logDevicePlacement :: Prelude.Bool,
    -                               _ConfigProto'graphOptions :: Prelude.Maybe GraphOptions,
    -                               _ConfigProto'operationTimeoutInMs :: Data.Int.Int64}
    -                 deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "deviceCount" ConfigProto =
    -     Data.Map.Map Data.Text.Text Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "deviceCount" ConfigProto
    -         ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'deviceCount
    -              (\ x__ y__ -> x__{_ConfigProto'deviceCount = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "intraOpParallelismThreads" ConfigProto =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "intraOpParallelismThreads"
    -         ConfigProto ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens
    -              _ConfigProto'intraOpParallelismThreads
    -              (\ x__ y__ -> x__{_ConfigProto'intraOpParallelismThreads = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "interOpParallelismThreads" ConfigProto =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "interOpParallelismThreads"
    -         ConfigProto ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens
    -              _ConfigProto'interOpParallelismThreads
    -              (\ x__ y__ -> x__{_ConfigProto'interOpParallelismThreads = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "usePerSessionThreads" ConfigProto =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "usePerSessionThreads" ConfigProto
    -         ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'usePerSessionThreads
    -              (\ x__ y__ -> x__{_ConfigProto'usePerSessionThreads = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "sessionInterOpThreadPool" ConfigProto =
    -     [ThreadPoolOptionProto]
    -
    -instance Data.ProtoLens.HasField "sessionInterOpThreadPool"
    -         ConfigProto ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'sessionInterOpThreadPool
    -              (\ x__ y__ -> x__{_ConfigProto'sessionInterOpThreadPool = y__})
    -
    -type instance Data.ProtoLens.Field "placementPeriod" ConfigProto =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "placementPeriod" ConfigProto
    -         ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'placementPeriod
    -              (\ x__ y__ -> x__{_ConfigProto'placementPeriod = y__})
    -
    -type instance Data.ProtoLens.Field "deviceFilters" ConfigProto =
    -     [Data.Text.Text]
    -
    -instance Data.ProtoLens.HasField "deviceFilters" ConfigProto
    -         ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'deviceFilters
    -              (\ x__ y__ -> x__{_ConfigProto'deviceFilters = y__})
    -
    -type instance Data.ProtoLens.Field "gpuOptions" ConfigProto =
    -     GPUOptions
    -
    -instance Data.ProtoLens.HasField "gpuOptions" ConfigProto
    -         ConfigProto where
    -        field _
    -          = (Prelude..) maybe'gpuOptions
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'gpuOptions" ConfigProto =
    -     Prelude.Maybe GPUOptions
    -
    -instance Data.ProtoLens.HasField "maybe'gpuOptions" ConfigProto
    -         ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'gpuOptions
    -              (\ x__ y__ -> x__{_ConfigProto'gpuOptions = y__})
    -
    -type instance Data.ProtoLens.Field "allowSoftPlacement" ConfigProto
    -     = Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "allowSoftPlacement" ConfigProto
    -         ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'allowSoftPlacement
    -              (\ x__ y__ -> x__{_ConfigProto'allowSoftPlacement = y__})
    -
    -type instance Data.ProtoLens.Field "logDevicePlacement" ConfigProto
    -     = Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "logDevicePlacement" ConfigProto
    -         ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'logDevicePlacement
    -              (\ x__ y__ -> x__{_ConfigProto'logDevicePlacement = y__})
    -
    -type instance Data.ProtoLens.Field "graphOptions" ConfigProto =
    -     GraphOptions
    -
    -instance Data.ProtoLens.HasField "graphOptions" ConfigProto
    -         ConfigProto where
    -        field _
    -          = (Prelude..) maybe'graphOptions
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'graphOptions" ConfigProto
    -     = Prelude.Maybe GraphOptions
    -
    -instance Data.ProtoLens.HasField "maybe'graphOptions" ConfigProto
    -         ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'graphOptions
    -              (\ x__ y__ -> x__{_ConfigProto'graphOptions = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "operationTimeoutInMs" ConfigProto =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "operationTimeoutInMs" ConfigProto
    -         ConfigProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'operationTimeoutInMs
    -              (\ x__ y__ -> x__{_ConfigProto'operationTimeoutInMs = y__})
    -
    -instance Data.Default.Class.Default ConfigProto where
    -        def
    -          = ConfigProto{_ConfigProto'deviceCount = Data.Map.empty,
    -                        _ConfigProto'intraOpParallelismThreads =
    -                          Data.ProtoLens.fieldDefault,
    -                        _ConfigProto'interOpParallelismThreads =
    -                          Data.ProtoLens.fieldDefault,
    -                        _ConfigProto'usePerSessionThreads = Data.ProtoLens.fieldDefault,
    -                        _ConfigProto'sessionInterOpThreadPool = [],
    -                        _ConfigProto'placementPeriod = Data.ProtoLens.fieldDefault,
    -                        _ConfigProto'deviceFilters = [],
    -                        _ConfigProto'gpuOptions = Prelude.Nothing,
    -                        _ConfigProto'allowSoftPlacement = Data.ProtoLens.fieldDefault,
    -                        _ConfigProto'logDevicePlacement = Data.ProtoLens.fieldDefault,
    -                        _ConfigProto'graphOptions = Prelude.Nothing,
    -                        _ConfigProto'operationTimeoutInMs = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message ConfigProto where
    -        descriptor
    -          = let deviceCount__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "device_count"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor ConfigProto'DeviceCountEntry)
    -                      (Data.ProtoLens.MapField key value deviceCount)
    -                intraOpParallelismThreads__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "intra_op_parallelism_threads"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         intraOpParallelismThreads)
    -                interOpParallelismThreads__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "inter_op_parallelism_threads"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         interOpParallelismThreads)
    -                usePerSessionThreads__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "use_per_session_threads"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         usePerSessionThreads)
    -                sessionInterOpThreadPool__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "session_inter_op_thread_pool"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor ThreadPoolOptionProto)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    -                         sessionInterOpThreadPool)
    -                placementPeriod__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "placement_period"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional placementPeriod)
    -                deviceFilters__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "device_filters"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    -                         deviceFilters)
    -                gpuOptions__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "gpu_options"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor GPUOptions)
    -                      (Data.ProtoLens.OptionalField maybe'gpuOptions)
    -                allowSoftPlacement__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "allow_soft_placement"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         allowSoftPlacement)
    -                logDevicePlacement__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "log_device_placement"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         logDevicePlacement)
    -                graphOptions__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "graph_options"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor GraphOptions)
    -                      (Data.ProtoLens.OptionalField maybe'graphOptions)
    -                operationTimeoutInMs__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "operation_timeout_in_ms"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         operationTimeoutInMs)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, deviceCount__field_descriptor),
    -                    (Data.ProtoLens.Tag 2,
    -                     intraOpParallelismThreads__field_descriptor),
    -                    (Data.ProtoLens.Tag 5,
    -                     interOpParallelismThreads__field_descriptor),
    -                    (Data.ProtoLens.Tag 9, usePerSessionThreads__field_descriptor),
    -                    (Data.ProtoLens.Tag 12,
    -                     sessionInterOpThreadPool__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, placementPeriod__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, deviceFilters__field_descriptor),
    -                    (Data.ProtoLens.Tag 6, gpuOptions__field_descriptor),
    -                    (Data.ProtoLens.Tag 7, allowSoftPlacement__field_descriptor),
    -                    (Data.ProtoLens.Tag 8, logDevicePlacement__field_descriptor),
    -                    (Data.ProtoLens.Tag 10, graphOptions__field_descriptor),
    -                    (Data.ProtoLens.Tag 11, operationTimeoutInMs__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("device_count", deviceCount__field_descriptor),
    -                    ("intra_op_parallelism_threads",
    -                     intraOpParallelismThreads__field_descriptor),
    -                    ("inter_op_parallelism_threads",
    -                     interOpParallelismThreads__field_descriptor),
    -                    ("use_per_session_threads",
    -                     usePerSessionThreads__field_descriptor),
    -                    ("session_inter_op_thread_pool",
    -                     sessionInterOpThreadPool__field_descriptor),
    -                    ("placement_period", placementPeriod__field_descriptor),
    -                    ("device_filters", deviceFilters__field_descriptor),
    -                    ("gpu_options", gpuOptions__field_descriptor),
    -                    ("allow_soft_placement", allowSoftPlacement__field_descriptor),
    -                    ("log_device_placement", logDevicePlacement__field_descriptor),
    -                    ("graph_options", graphOptions__field_descriptor),
    -                    ("operation_timeout_in_ms",
    -                     operationTimeoutInMs__field_descriptor)])
    -
    -data ConfigProto'DeviceCountEntry = ConfigProto'DeviceCountEntry{_ConfigProto'DeviceCountEntry'key
    -                                                                 :: Data.Text.Text,
    -                                                                 _ConfigProto'DeviceCountEntry'value
    -                                                                 :: Data.Int.Int32}
    -                                  deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance
    -     Data.ProtoLens.Field "key" ConfigProto'DeviceCountEntry =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "key" ConfigProto'DeviceCountEntry
    -         ConfigProto'DeviceCountEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'DeviceCountEntry'key
    -              (\ x__ y__ -> x__{_ConfigProto'DeviceCountEntry'key = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "value" ConfigProto'DeviceCountEntry =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "value"
    -         ConfigProto'DeviceCountEntry ConfigProto'DeviceCountEntry where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ConfigProto'DeviceCountEntry'value
    -              (\ x__ y__ -> x__{_ConfigProto'DeviceCountEntry'value = y__})
    -
    -instance Data.Default.Class.Default ConfigProto'DeviceCountEntry
    -         where
    -        def
    -          = ConfigProto'DeviceCountEntry{_ConfigProto'DeviceCountEntry'key =
    -                                           Data.ProtoLens.fieldDefault,
    -                                         _ConfigProto'DeviceCountEntry'value =
    -                                           Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message ConfigProto'DeviceCountEntry where
    -        descriptor
    -          = let key__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "key"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional key)
    -                value__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "value"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional value)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, key__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, value__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("key", key__field_descriptor),
    -                    ("value", value__field_descriptor)])
    -
    -data DebugTensorWatch = DebugTensorWatch{_DebugTensorWatch'nodeName
    -                                         :: Data.Text.Text,
    -                                         _DebugTensorWatch'outputSlot :: Data.Int.Int32,
    -                                         _DebugTensorWatch'debugOps :: [Data.Text.Text],
    -                                         _DebugTensorWatch'debugUrls :: [Data.Text.Text]}
    -                      deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "nodeName" DebugTensorWatch =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "nodeName" DebugTensorWatch
    -         DebugTensorWatch where
    -        field _
    -          = Lens.Family2.Unchecked.lens _DebugTensorWatch'nodeName
    -              (\ x__ y__ -> x__{_DebugTensorWatch'nodeName = y__})
    -
    -type instance Data.ProtoLens.Field "outputSlot" DebugTensorWatch =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "outputSlot" DebugTensorWatch
    -         DebugTensorWatch where
    -        field _
    -          = Lens.Family2.Unchecked.lens _DebugTensorWatch'outputSlot
    -              (\ x__ y__ -> x__{_DebugTensorWatch'outputSlot = y__})
    -
    -type instance Data.ProtoLens.Field "debugOps" DebugTensorWatch =
    -     [Data.Text.Text]
    -
    -instance Data.ProtoLens.HasField "debugOps" DebugTensorWatch
    -         DebugTensorWatch where
    -        field _
    -          = Lens.Family2.Unchecked.lens _DebugTensorWatch'debugOps
    -              (\ x__ y__ -> x__{_DebugTensorWatch'debugOps = y__})
    -
    -type instance Data.ProtoLens.Field "debugUrls" DebugTensorWatch =
    -     [Data.Text.Text]
    -
    -instance Data.ProtoLens.HasField "debugUrls" DebugTensorWatch
    -         DebugTensorWatch where
    -        field _
    -          = Lens.Family2.Unchecked.lens _DebugTensorWatch'debugUrls
    -              (\ x__ y__ -> x__{_DebugTensorWatch'debugUrls = y__})
    -
    -instance Data.Default.Class.Default DebugTensorWatch where
    -        def
    -          = DebugTensorWatch{_DebugTensorWatch'nodeName =
    -                               Data.ProtoLens.fieldDefault,
    -                             _DebugTensorWatch'outputSlot = Data.ProtoLens.fieldDefault,
    -                             _DebugTensorWatch'debugOps = [], _DebugTensorWatch'debugUrls = []}
    -
    -instance Data.ProtoLens.Message DebugTensorWatch where
    -        descriptor
    -          = let nodeName__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "node_name"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional nodeName)
    -                outputSlot__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "output_slot"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional outputSlot)
    -                debugOps__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "debug_ops"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked debugOps)
    -                debugUrls__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "debug_urls"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked debugUrls)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, nodeName__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, outputSlot__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, debugOps__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, debugUrls__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("node_name", nodeName__field_descriptor),
    -                    ("output_slot", outputSlot__field_descriptor),
    -                    ("debug_ops", debugOps__field_descriptor),
    -                    ("debug_urls", debugUrls__field_descriptor)])
    -
    -data GPUOptions = GPUOptions{_GPUOptions'perProcessGpuMemoryFraction
    -                             :: Prelude.Double,
    -                             _GPUOptions'allocatorType :: Data.Text.Text,
    -                             _GPUOptions'deferredDeletionBytes :: Data.Int.Int64,
    -                             _GPUOptions'allowGrowth :: Prelude.Bool,
    -                             _GPUOptions'visibleDeviceList :: Data.Text.Text}
    -                deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance
    -     Data.ProtoLens.Field "perProcessGpuMemoryFraction" GPUOptions =
    -     Prelude.Double
    -
    -instance Data.ProtoLens.HasField "perProcessGpuMemoryFraction"
    -         GPUOptions GPUOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens
    -              _GPUOptions'perProcessGpuMemoryFraction
    -              (\ x__ y__ -> x__{_GPUOptions'perProcessGpuMemoryFraction = y__})
    -
    -type instance Data.ProtoLens.Field "allocatorType" GPUOptions =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "allocatorType" GPUOptions
    -         GPUOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GPUOptions'allocatorType
    -              (\ x__ y__ -> x__{_GPUOptions'allocatorType = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "deferredDeletionBytes" GPUOptions =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "deferredDeletionBytes" GPUOptions
    -         GPUOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GPUOptions'deferredDeletionBytes
    -              (\ x__ y__ -> x__{_GPUOptions'deferredDeletionBytes = y__})
    -
    -type instance Data.ProtoLens.Field "allowGrowth" GPUOptions =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "allowGrowth" GPUOptions
    -         GPUOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GPUOptions'allowGrowth
    -              (\ x__ y__ -> x__{_GPUOptions'allowGrowth = y__})
    -
    -type instance Data.ProtoLens.Field "visibleDeviceList" GPUOptions =
    -     Data.Text.Text
    -
    -instance Data.ProtoLens.HasField "visibleDeviceList" GPUOptions
    -         GPUOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GPUOptions'visibleDeviceList
    -              (\ x__ y__ -> x__{_GPUOptions'visibleDeviceList = y__})
    -
    -instance Data.Default.Class.Default GPUOptions where
    -        def
    -          = GPUOptions{_GPUOptions'perProcessGpuMemoryFraction =
    -                         Data.ProtoLens.fieldDefault,
    -                       _GPUOptions'allocatorType = Data.ProtoLens.fieldDefault,
    -                       _GPUOptions'deferredDeletionBytes = Data.ProtoLens.fieldDefault,
    -                       _GPUOptions'allowGrowth = Data.ProtoLens.fieldDefault,
    -                       _GPUOptions'visibleDeviceList = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message GPUOptions where
    -        descriptor
    -          = let perProcessGpuMemoryFraction__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "per_process_gpu_memory_fraction"
    -                      (Data.ProtoLens.DoubleField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Double)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         perProcessGpuMemoryFraction)
    -                allocatorType__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "allocator_type"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allocatorType)
    -                deferredDeletionBytes__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "deferred_deletion_bytes"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         deferredDeletionBytes)
    -                allowGrowth__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "allow_growth"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional allowGrowth)
    -                visibleDeviceList__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "visible_device_list"
    -                      (Data.ProtoLens.StringField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Text.Text)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         visibleDeviceList)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1,
    -                     perProcessGpuMemoryFraction__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, allocatorType__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, deferredDeletionBytes__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, allowGrowth__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, visibleDeviceList__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("per_process_gpu_memory_fraction",
    -                     perProcessGpuMemoryFraction__field_descriptor),
    -                    ("allocator_type", allocatorType__field_descriptor),
    -                    ("deferred_deletion_bytes",
    -                     deferredDeletionBytes__field_descriptor),
    -                    ("allow_growth", allowGrowth__field_descriptor),
    -                    ("visible_device_list", visibleDeviceList__field_descriptor)])
    -
    -data GraphOptions = GraphOptions{_GraphOptions'enableRecvScheduling
    -                                 :: Prelude.Bool,
    -                                 _GraphOptions'optimizerOptions :: Prelude.Maybe OptimizerOptions,
    -                                 _GraphOptions'buildCostModel :: Data.Int.Int64,
    -                                 _GraphOptions'buildCostModelAfter :: Data.Int.Int64,
    -                                 _GraphOptions'inferShapes :: Prelude.Bool,
    -                                 _GraphOptions'placePrunedGraph :: Prelude.Bool,
    -                                 _GraphOptions'enableBfloat16Sendrecv :: Prelude.Bool,
    -                                 _GraphOptions'timelineStep :: Data.Int.Int32}
    -                  deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance
    -     Data.ProtoLens.Field "enableRecvScheduling" GraphOptions =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "enableRecvScheduling"
    -         GraphOptions GraphOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphOptions'enableRecvScheduling
    -              (\ x__ y__ -> x__{_GraphOptions'enableRecvScheduling = y__})
    -
    -type instance Data.ProtoLens.Field "optimizerOptions" GraphOptions
    -     = OptimizerOptions
    -
    -instance Data.ProtoLens.HasField "optimizerOptions" GraphOptions
    -         GraphOptions where
    -        field _
    -          = (Prelude..) maybe'optimizerOptions
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance
    -     Data.ProtoLens.Field "maybe'optimizerOptions" GraphOptions =
    -     Prelude.Maybe OptimizerOptions
    -
    -instance Data.ProtoLens.HasField "maybe'optimizerOptions"
    -         GraphOptions GraphOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphOptions'optimizerOptions
    -              (\ x__ y__ -> x__{_GraphOptions'optimizerOptions = y__})
    -
    -type instance Data.ProtoLens.Field "buildCostModel" GraphOptions =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "buildCostModel" GraphOptions
    -         GraphOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphOptions'buildCostModel
    -              (\ x__ y__ -> x__{_GraphOptions'buildCostModel = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "buildCostModelAfter" GraphOptions =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "buildCostModelAfter" GraphOptions
    -         GraphOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphOptions'buildCostModelAfter
    -              (\ x__ y__ -> x__{_GraphOptions'buildCostModelAfter = y__})
    -
    -type instance Data.ProtoLens.Field "inferShapes" GraphOptions =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "inferShapes" GraphOptions
    -         GraphOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphOptions'inferShapes
    -              (\ x__ y__ -> x__{_GraphOptions'inferShapes = y__})
    -
    -type instance Data.ProtoLens.Field "placePrunedGraph" GraphOptions
    -     = Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "placePrunedGraph" GraphOptions
    -         GraphOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphOptions'placePrunedGraph
    -              (\ x__ y__ -> x__{_GraphOptions'placePrunedGraph = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "enableBfloat16Sendrecv" GraphOptions =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "enableBfloat16Sendrecv"
    -         GraphOptions GraphOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphOptions'enableBfloat16Sendrecv
    -              (\ x__ y__ -> x__{_GraphOptions'enableBfloat16Sendrecv = y__})
    -
    -type instance Data.ProtoLens.Field "timelineStep" GraphOptions =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "timelineStep" GraphOptions
    -         GraphOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _GraphOptions'timelineStep
    -              (\ x__ y__ -> x__{_GraphOptions'timelineStep = y__})
    -
    -instance Data.Default.Class.Default GraphOptions where
    -        def
    -          = GraphOptions{_GraphOptions'enableRecvScheduling =
    -                           Data.ProtoLens.fieldDefault,
    -                         _GraphOptions'optimizerOptions = Prelude.Nothing,
    -                         _GraphOptions'buildCostModel = Data.ProtoLens.fieldDefault,
    -                         _GraphOptions'buildCostModelAfter = Data.ProtoLens.fieldDefault,
    -                         _GraphOptions'inferShapes = Data.ProtoLens.fieldDefault,
    -                         _GraphOptions'placePrunedGraph = Data.ProtoLens.fieldDefault,
    -                         _GraphOptions'enableBfloat16Sendrecv = Data.ProtoLens.fieldDefault,
    -                         _GraphOptions'timelineStep = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message GraphOptions where
    -        descriptor
    -          = let enableRecvScheduling__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "enable_recv_scheduling"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         enableRecvScheduling)
    -                optimizerOptions__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "optimizer_options"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor OptimizerOptions)
    -                      (Data.ProtoLens.OptionalField maybe'optimizerOptions)
    -                buildCostModel__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "build_cost_model"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional buildCostModel)
    -                buildCostModelAfter__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "build_cost_model_after"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         buildCostModelAfter)
    -                inferShapes__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "infer_shapes"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional inferShapes)
    -                placePrunedGraph__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "place_pruned_graph"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         placePrunedGraph)
    -                enableBfloat16Sendrecv__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "enable_bfloat16_sendrecv"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         enableBfloat16Sendrecv)
    -                timelineStep__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "timeline_step"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional timelineStep)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 2, enableRecvScheduling__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, optimizerOptions__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, buildCostModel__field_descriptor),
    -                    (Data.ProtoLens.Tag 9, buildCostModelAfter__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, inferShapes__field_descriptor),
    -                    (Data.ProtoLens.Tag 6, placePrunedGraph__field_descriptor),
    -                    (Data.ProtoLens.Tag 7, enableBfloat16Sendrecv__field_descriptor),
    -                    (Data.ProtoLens.Tag 8, timelineStep__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("enable_recv_scheduling",
    -                     enableRecvScheduling__field_descriptor),
    -                    ("optimizer_options", optimizerOptions__field_descriptor),
    -                    ("build_cost_model", buildCostModel__field_descriptor),
    -                    ("build_cost_model_after", buildCostModelAfter__field_descriptor),
    -                    ("infer_shapes", inferShapes__field_descriptor),
    -                    ("place_pruned_graph", placePrunedGraph__field_descriptor),
    -                    ("enable_bfloat16_sendrecv",
    -                     enableBfloat16Sendrecv__field_descriptor),
    -                    ("timeline_step", timelineStep__field_descriptor)])
    -
    -data OptimizerOptions = OptimizerOptions{_OptimizerOptions'doCommonSubexpressionElimination
    -                                         :: Prelude.Bool,
    -                                         _OptimizerOptions'doConstantFolding :: Prelude.Bool,
    -                                         _OptimizerOptions'doFunctionInlining :: Prelude.Bool,
    -                                         _OptimizerOptions'optLevel :: OptimizerOptions'Level}
    -                      deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance
    -     Data.ProtoLens.Field "doCommonSubexpressionElimination"
    -       OptimizerOptions
    -     = Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "doCommonSubexpressionElimination"
    -         OptimizerOptions OptimizerOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens
    -              _OptimizerOptions'doCommonSubexpressionElimination
    -              (\ x__ y__ ->
    -                 x__{_OptimizerOptions'doCommonSubexpressionElimination = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "doConstantFolding" OptimizerOptions =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "doConstantFolding"
    -         OptimizerOptions OptimizerOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OptimizerOptions'doConstantFolding
    -              (\ x__ y__ -> x__{_OptimizerOptions'doConstantFolding = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "doFunctionInlining" OptimizerOptions =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "doFunctionInlining"
    -         OptimizerOptions OptimizerOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OptimizerOptions'doFunctionInlining
    -              (\ x__ y__ -> x__{_OptimizerOptions'doFunctionInlining = y__})
    -
    -type instance Data.ProtoLens.Field "optLevel" OptimizerOptions =
    -     OptimizerOptions'Level
    -
    -instance Data.ProtoLens.HasField "optLevel" OptimizerOptions
    -         OptimizerOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _OptimizerOptions'optLevel
    -              (\ x__ y__ -> x__{_OptimizerOptions'optLevel = y__})
    -
    -instance Data.Default.Class.Default OptimizerOptions where
    -        def
    -          = OptimizerOptions{_OptimizerOptions'doCommonSubexpressionElimination
    -                               = Data.ProtoLens.fieldDefault,
    -                             _OptimizerOptions'doConstantFolding = Data.ProtoLens.fieldDefault,
    -                             _OptimizerOptions'doFunctionInlining = Data.ProtoLens.fieldDefault,
    -                             _OptimizerOptions'optLevel = Data.Default.Class.def}
    -
    -instance Data.ProtoLens.Message OptimizerOptions where
    -        descriptor
    -          = let doCommonSubexpressionElimination__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor
    -                      "do_common_subexpression_elimination"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         doCommonSubexpressionElimination)
    -                doConstantFolding__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "do_constant_folding"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         doConstantFolding)
    -                doFunctionInlining__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "do_function_inlining"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         doFunctionInlining)
    -                optLevel__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "opt_level"
    -                      (Data.ProtoLens.EnumField ::
    -                         Data.ProtoLens.FieldTypeDescriptor OptimizerOptions'Level)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional optLevel)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1,
    -                     doCommonSubexpressionElimination__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, doConstantFolding__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, doFunctionInlining__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, optLevel__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("do_common_subexpression_elimination",
    -                     doCommonSubexpressionElimination__field_descriptor),
    -                    ("do_constant_folding", doConstantFolding__field_descriptor),
    -                    ("do_function_inlining", doFunctionInlining__field_descriptor),
    -                    ("opt_level", optLevel__field_descriptor)])
    -
    -data OptimizerOptions'Level = OptimizerOptions'L0
    -                            | OptimizerOptions'L1
    -                            deriving (Prelude.Show, Prelude.Eq)
    -
    -instance Data.Default.Class.Default OptimizerOptions'Level where
    -        def = OptimizerOptions'L0
    -
    -instance Data.ProtoLens.FieldDefault OptimizerOptions'Level where
    -        fieldDefault = OptimizerOptions'L0
    -
    -instance Data.ProtoLens.MessageEnum OptimizerOptions'Level where
    -        maybeToEnum (-1) = Prelude.Just OptimizerOptions'L0
    -        maybeToEnum 0 = Prelude.Just OptimizerOptions'L1
    -        maybeToEnum _ = Prelude.Nothing
    -        showEnum OptimizerOptions'L0 = "L0"
    -        showEnum OptimizerOptions'L1 = "L1"
    -        readEnum "L0" = Prelude.Just OptimizerOptions'L0
    -        readEnum "L1" = Prelude.Just OptimizerOptions'L1
    -        readEnum _ = Prelude.Nothing
    -
    -instance Prelude.Enum OptimizerOptions'Level where
    -        toEnum k__
    -          = Prelude.maybe
    -              (Prelude.error
    -                 ((Prelude.++) "toEnum: unknown value for enum Level: "
    -                    (Prelude.show k__)))
    -              Prelude.id
    -              (Data.ProtoLens.maybeToEnum k__)
    -        fromEnum OptimizerOptions'L0 = -1
    -        fromEnum OptimizerOptions'L1 = 0
    -        succ OptimizerOptions'L1
    -          = Prelude.error
    -              "Ident \"OptimizerOptions'Level\".Ident \"succ\": bad argument Ident \"OptimizerOptions'L1\". This value would be out of bounds."
    -        succ OptimizerOptions'L0 = OptimizerOptions'L1
    -        pred OptimizerOptions'L0
    -          = Prelude.error
    -              "Ident \"OptimizerOptions'Level\".Ident \"pred\": bad argument Ident \"OptimizerOptions'L0\". This value would be out of bounds."
    -        pred OptimizerOptions'L1 = OptimizerOptions'L0
    -        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
    -        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
    -        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
    -        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
    -
    -instance Prelude.Bounded OptimizerOptions'Level where
    -        minBound = OptimizerOptions'L0
    -        maxBound = OptimizerOptions'L1
    -
    -data RunMetadata = RunMetadata{_RunMetadata'stepStats ::
    -                               Prelude.Maybe Proto.Tensorflow.Core.Framework.StepStats.StepStats,
    -                               _RunMetadata'costGraph ::
    -                               Prelude.Maybe
    -                                 Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef,
    -                               _RunMetadata'partitionGraphs ::
    -                               [Proto.Tensorflow.Core.Framework.Graph.GraphDef]}
    -                 deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "stepStats" RunMetadata =
    -     Proto.Tensorflow.Core.Framework.StepStats.StepStats
    -
    -instance Data.ProtoLens.HasField "stepStats" RunMetadata
    -         RunMetadata where
    -        field _
    -          = (Prelude..) maybe'stepStats
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'stepStats" RunMetadata =
    -     Prelude.Maybe Proto.Tensorflow.Core.Framework.StepStats.StepStats
    -
    -instance Data.ProtoLens.HasField "maybe'stepStats" RunMetadata
    -         RunMetadata where
    -        field _
    -          = Lens.Family2.Unchecked.lens _RunMetadata'stepStats
    -              (\ x__ y__ -> x__{_RunMetadata'stepStats = y__})
    -
    -type instance Data.ProtoLens.Field "costGraph" RunMetadata =
    -     Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef
    -
    -instance Data.ProtoLens.HasField "costGraph" RunMetadata
    -         RunMetadata where
    -        field _
    -          = (Prelude..) maybe'costGraph
    -              (Data.ProtoLens.maybeLens Data.Default.Class.def)
    -
    -type instance Data.ProtoLens.Field "maybe'costGraph" RunMetadata =
    -     Prelude.Maybe
    -       Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef
    -
    -instance Data.ProtoLens.HasField "maybe'costGraph" RunMetadata
    -         RunMetadata where
    -        field _
    -          = Lens.Family2.Unchecked.lens _RunMetadata'costGraph
    -              (\ x__ y__ -> x__{_RunMetadata'costGraph = y__})
    -
    -type instance Data.ProtoLens.Field "partitionGraphs" RunMetadata =
    -     [Proto.Tensorflow.Core.Framework.Graph.GraphDef]
    -
    -instance Data.ProtoLens.HasField "partitionGraphs" RunMetadata
    -         RunMetadata where
    -        field _
    -          = Lens.Family2.Unchecked.lens _RunMetadata'partitionGraphs
    -              (\ x__ y__ -> x__{_RunMetadata'partitionGraphs = y__})
    -
    -instance Data.Default.Class.Default RunMetadata where
    -        def
    -          = RunMetadata{_RunMetadata'stepStats = Prelude.Nothing,
    -                        _RunMetadata'costGraph = Prelude.Nothing,
    -                        _RunMetadata'partitionGraphs = []}
    -
    -instance Data.ProtoLens.Message RunMetadata where
    -        descriptor
    -          = let stepStats__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "step_stats"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.StepStats.StepStats)
    -                      (Data.ProtoLens.OptionalField maybe'stepStats)
    -                costGraph__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "cost_graph"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.CostGraph.CostGraphDef)
    -                      (Data.ProtoLens.OptionalField maybe'costGraph)
    -                partitionGraphs__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "partition_graphs"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor
    -                           Proto.Tensorflow.Core.Framework.Graph.GraphDef)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    -                         partitionGraphs)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, stepStats__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, costGraph__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, partitionGraphs__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("step_stats", stepStats__field_descriptor),
    -                    ("cost_graph", costGraph__field_descriptor),
    -                    ("partition_graphs", partitionGraphs__field_descriptor)])
    -
    -data RunOptions = RunOptions{_RunOptions'traceLevel ::
    -                             RunOptions'TraceLevel,
    -                             _RunOptions'timeoutInMs :: Data.Int.Int64,
    -                             _RunOptions'interOpThreadPool :: Data.Int.Int32,
    -                             _RunOptions'debugTensorWatchOpts :: [DebugTensorWatch],
    -                             _RunOptions'outputPartitionGraphs :: Prelude.Bool}
    -                deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance Data.ProtoLens.Field "traceLevel" RunOptions =
    -     RunOptions'TraceLevel
    -
    -instance Data.ProtoLens.HasField "traceLevel" RunOptions RunOptions
    -         where
    -        field _
    -          = Lens.Family2.Unchecked.lens _RunOptions'traceLevel
    -              (\ x__ y__ -> x__{_RunOptions'traceLevel = y__})
    -
    -type instance Data.ProtoLens.Field "timeoutInMs" RunOptions =
    -     Data.Int.Int64
    -
    -instance Data.ProtoLens.HasField "timeoutInMs" RunOptions
    -         RunOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _RunOptions'timeoutInMs
    -              (\ x__ y__ -> x__{_RunOptions'timeoutInMs = y__})
    -
    -type instance Data.ProtoLens.Field "interOpThreadPool" RunOptions =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "interOpThreadPool" RunOptions
    -         RunOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _RunOptions'interOpThreadPool
    -              (\ x__ y__ -> x__{_RunOptions'interOpThreadPool = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "debugTensorWatchOpts" RunOptions =
    -     [DebugTensorWatch]
    -
    -instance Data.ProtoLens.HasField "debugTensorWatchOpts" RunOptions
    -         RunOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _RunOptions'debugTensorWatchOpts
    -              (\ x__ y__ -> x__{_RunOptions'debugTensorWatchOpts = y__})
    -
    -type instance
    -     Data.ProtoLens.Field "outputPartitionGraphs" RunOptions =
    -     Prelude.Bool
    -
    -instance Data.ProtoLens.HasField "outputPartitionGraphs" RunOptions
    -         RunOptions where
    -        field _
    -          = Lens.Family2.Unchecked.lens _RunOptions'outputPartitionGraphs
    -              (\ x__ y__ -> x__{_RunOptions'outputPartitionGraphs = y__})
    -
    -instance Data.Default.Class.Default RunOptions where
    -        def
    -          = RunOptions{_RunOptions'traceLevel = Data.Default.Class.def,
    -                       _RunOptions'timeoutInMs = Data.ProtoLens.fieldDefault,
    -                       _RunOptions'interOpThreadPool = Data.ProtoLens.fieldDefault,
    -                       _RunOptions'debugTensorWatchOpts = [],
    -                       _RunOptions'outputPartitionGraphs = Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message RunOptions where
    -        descriptor
    -          = let traceLevel__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "trace_level"
    -                      (Data.ProtoLens.EnumField ::
    -                         Data.ProtoLens.FieldTypeDescriptor RunOptions'TraceLevel)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional traceLevel)
    -                timeoutInMs__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "timeout_in_ms"
    -                      (Data.ProtoLens.Int64Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int64)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional timeoutInMs)
    -                interOpThreadPool__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "inter_op_thread_pool"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         interOpThreadPool)
    -                debugTensorWatchOpts__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "debug_tensor_watch_opts"
    -                      (Data.ProtoLens.MessageField ::
    -                         Data.ProtoLens.FieldTypeDescriptor DebugTensorWatch)
    -                      (Data.ProtoLens.RepeatedField Data.ProtoLens.Unpacked
    -                         debugTensorWatchOpts)
    -                outputPartitionGraphs__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "output_partition_graphs"
    -                      (Data.ProtoLens.BoolField ::
    -                         Data.ProtoLens.FieldTypeDescriptor Prelude.Bool)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional
    -                         outputPartitionGraphs)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, traceLevel__field_descriptor),
    -                    (Data.ProtoLens.Tag 2, timeoutInMs__field_descriptor),
    -                    (Data.ProtoLens.Tag 3, interOpThreadPool__field_descriptor),
    -                    (Data.ProtoLens.Tag 4, debugTensorWatchOpts__field_descriptor),
    -                    (Data.ProtoLens.Tag 5, outputPartitionGraphs__field_descriptor)])
    -                (Data.Map.fromList
    -                   [("trace_level", traceLevel__field_descriptor),
    -                    ("timeout_in_ms", timeoutInMs__field_descriptor),
    -                    ("inter_op_thread_pool", interOpThreadPool__field_descriptor),
    -                    ("debug_tensor_watch_opts",
    -                     debugTensorWatchOpts__field_descriptor),
    -                    ("output_partition_graphs",
    -                     outputPartitionGraphs__field_descriptor)])
    -
    -data RunOptions'TraceLevel = RunOptions'NO_TRACE
    -                           | RunOptions'SOFTWARE_TRACE
    -                           | RunOptions'HARDWARE_TRACE
    -                           | RunOptions'FULL_TRACE
    -                           deriving (Prelude.Show, Prelude.Eq)
    -
    -instance Data.Default.Class.Default RunOptions'TraceLevel where
    -        def = RunOptions'NO_TRACE
    -
    -instance Data.ProtoLens.FieldDefault RunOptions'TraceLevel where
    -        fieldDefault = RunOptions'NO_TRACE
    -
    -instance Data.ProtoLens.MessageEnum RunOptions'TraceLevel where
    -        maybeToEnum 0 = Prelude.Just RunOptions'NO_TRACE
    -        maybeToEnum 1 = Prelude.Just RunOptions'SOFTWARE_TRACE
    -        maybeToEnum 2 = Prelude.Just RunOptions'HARDWARE_TRACE
    -        maybeToEnum 3 = Prelude.Just RunOptions'FULL_TRACE
    -        maybeToEnum _ = Prelude.Nothing
    -        showEnum RunOptions'NO_TRACE = "NO_TRACE"
    -        showEnum RunOptions'SOFTWARE_TRACE = "SOFTWARE_TRACE"
    -        showEnum RunOptions'HARDWARE_TRACE = "HARDWARE_TRACE"
    -        showEnum RunOptions'FULL_TRACE = "FULL_TRACE"
    -        readEnum "NO_TRACE" = Prelude.Just RunOptions'NO_TRACE
    -        readEnum "SOFTWARE_TRACE" = Prelude.Just RunOptions'SOFTWARE_TRACE
    -        readEnum "HARDWARE_TRACE" = Prelude.Just RunOptions'HARDWARE_TRACE
    -        readEnum "FULL_TRACE" = Prelude.Just RunOptions'FULL_TRACE
    -        readEnum _ = Prelude.Nothing
    -
    -instance Prelude.Enum RunOptions'TraceLevel where
    -        toEnum k__
    -          = Prelude.maybe
    -              (Prelude.error
    -                 ((Prelude.++) "toEnum: unknown value for enum TraceLevel: "
    -                    (Prelude.show k__)))
    -              Prelude.id
    -              (Data.ProtoLens.maybeToEnum k__)
    -        fromEnum RunOptions'NO_TRACE = 0
    -        fromEnum RunOptions'SOFTWARE_TRACE = 1
    -        fromEnum RunOptions'HARDWARE_TRACE = 2
    -        fromEnum RunOptions'FULL_TRACE = 3
    -        succ RunOptions'FULL_TRACE
    -          = Prelude.error
    -              "Ident \"RunOptions'TraceLevel\".Ident \"succ\": bad argument Ident \"RunOptions'FULL_TRACE\". This value would be out of bounds."
    -        succ RunOptions'NO_TRACE = RunOptions'SOFTWARE_TRACE
    -        succ RunOptions'SOFTWARE_TRACE = RunOptions'HARDWARE_TRACE
    -        succ RunOptions'HARDWARE_TRACE = RunOptions'FULL_TRACE
    -        pred RunOptions'NO_TRACE
    -          = Prelude.error
    -              "Ident \"RunOptions'TraceLevel\".Ident \"pred\": bad argument Ident \"RunOptions'NO_TRACE\". This value would be out of bounds."
    -        pred RunOptions'SOFTWARE_TRACE = RunOptions'NO_TRACE
    -        pred RunOptions'HARDWARE_TRACE = RunOptions'SOFTWARE_TRACE
    -        pred RunOptions'FULL_TRACE = RunOptions'HARDWARE_TRACE
    -        enumFrom = Data.ProtoLens.Message.Enum.messageEnumFrom
    -        enumFromTo = Data.ProtoLens.Message.Enum.messageEnumFromTo
    -        enumFromThen = Data.ProtoLens.Message.Enum.messageEnumFromThen
    -        enumFromThenTo = Data.ProtoLens.Message.Enum.messageEnumFromThenTo
    -
    -instance Prelude.Bounded RunOptions'TraceLevel where
    -        minBound = RunOptions'NO_TRACE
    -        maxBound = RunOptions'FULL_TRACE
    -
    -data ThreadPoolOptionProto = ThreadPoolOptionProto{_ThreadPoolOptionProto'numThreads
    -                                                   :: Data.Int.Int32}
    -                           deriving (Prelude.Show, Prelude.Eq)
    -
    -type instance
    -     Data.ProtoLens.Field "numThreads" ThreadPoolOptionProto =
    -     Data.Int.Int32
    -
    -instance Data.ProtoLens.HasField "numThreads" ThreadPoolOptionProto
    -         ThreadPoolOptionProto where
    -        field _
    -          = Lens.Family2.Unchecked.lens _ThreadPoolOptionProto'numThreads
    -              (\ x__ y__ -> x__{_ThreadPoolOptionProto'numThreads = y__})
    -
    -instance Data.Default.Class.Default ThreadPoolOptionProto where
    -        def
    -          = ThreadPoolOptionProto{_ThreadPoolOptionProto'numThreads =
    -                                    Data.ProtoLens.fieldDefault}
    -
    -instance Data.ProtoLens.Message ThreadPoolOptionProto where
    -        descriptor
    -          = let numThreads__field_descriptor
    -                  = Data.ProtoLens.FieldDescriptor "num_threads"
    -                      (Data.ProtoLens.Int32Field ::
    -                         Data.ProtoLens.FieldTypeDescriptor Data.Int.Int32)
    -                      (Data.ProtoLens.PlainField Data.ProtoLens.Optional numThreads)
    -              in
    -              Data.ProtoLens.MessageDescriptor
    -                (Data.Map.fromList
    -                   [(Data.ProtoLens.Tag 1, numThreads__field_descriptor)])
    -                (Data.Map.fromList [("num_threads", numThreads__field_descriptor)])
    -
    -allocatorType ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "allocatorType" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "allocatorType" msg)
    -                  (Data.ProtoLens.Field "allocatorType" msg')
    -allocatorType
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "allocatorType")
    -
    -allowGrowth ::
    -            forall msg msg' . Data.ProtoLens.HasField "allowGrowth" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "allowGrowth" msg)
    -                (Data.ProtoLens.Field "allowGrowth" msg')
    -allowGrowth
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "allowGrowth")
    -
    -allowSoftPlacement ::
    -                   forall msg msg' .
    -                     Data.ProtoLens.HasField "allowSoftPlacement" msg msg' =>
    -                     Lens.Family2.Lens msg msg'
    -                       (Data.ProtoLens.Field "allowSoftPlacement" msg)
    -                       (Data.ProtoLens.Field "allowSoftPlacement" msg')
    -allowSoftPlacement
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "allowSoftPlacement")
    -
    -buildCostModel ::
    -               forall msg msg' .
    -                 Data.ProtoLens.HasField "buildCostModel" msg msg' =>
    -                 Lens.Family2.Lens msg msg'
    -                   (Data.ProtoLens.Field "buildCostModel" msg)
    -                   (Data.ProtoLens.Field "buildCostModel" msg')
    -buildCostModel
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "buildCostModel")
    -
    -buildCostModelAfter ::
    -                    forall msg msg' .
    -                      Data.ProtoLens.HasField "buildCostModelAfter" msg msg' =>
    -                      Lens.Family2.Lens msg msg'
    -                        (Data.ProtoLens.Field "buildCostModelAfter" msg)
    -                        (Data.ProtoLens.Field "buildCostModelAfter" msg')
    -buildCostModelAfter
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "buildCostModelAfter")
    -
    -costGraph ::
    -          forall msg msg' . Data.ProtoLens.HasField "costGraph" msg msg' =>
    -            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "costGraph" msg)
    -              (Data.ProtoLens.Field "costGraph" msg')
    -costGraph
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "costGraph")
    -
    -debugOps ::
    -         forall msg msg' . Data.ProtoLens.HasField "debugOps" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "debugOps" msg)
    -             (Data.ProtoLens.Field "debugOps" msg')
    -debugOps
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "debugOps")
    -
    -debugTensorWatchOpts ::
    -                     forall msg msg' .
    -                       Data.ProtoLens.HasField "debugTensorWatchOpts" msg msg' =>
    -                       Lens.Family2.Lens msg msg'
    -                         (Data.ProtoLens.Field "debugTensorWatchOpts" msg)
    -                         (Data.ProtoLens.Field "debugTensorWatchOpts" msg')
    -debugTensorWatchOpts
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "debugTensorWatchOpts")
    -
    -debugUrls ::
    -          forall msg msg' . Data.ProtoLens.HasField "debugUrls" msg msg' =>
    -            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "debugUrls" msg)
    -              (Data.ProtoLens.Field "debugUrls" msg')
    -debugUrls
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "debugUrls")
    -
    -deferredDeletionBytes ::
    -                      forall msg msg' .
    -                        Data.ProtoLens.HasField "deferredDeletionBytes" msg msg' =>
    -                        Lens.Family2.Lens msg msg'
    -                          (Data.ProtoLens.Field "deferredDeletionBytes" msg)
    -                          (Data.ProtoLens.Field "deferredDeletionBytes" msg')
    -deferredDeletionBytes
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "deferredDeletionBytes")
    -
    -deviceCount ::
    -            forall msg msg' . Data.ProtoLens.HasField "deviceCount" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "deviceCount" msg)
    -                (Data.ProtoLens.Field "deviceCount" msg')
    -deviceCount
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "deviceCount")
    -
    -deviceFilters ::
    -              forall msg msg' .
    -                Data.ProtoLens.HasField "deviceFilters" msg msg' =>
    -                Lens.Family2.Lens msg msg'
    -                  (Data.ProtoLens.Field "deviceFilters" msg)
    -                  (Data.ProtoLens.Field "deviceFilters" msg')
    -deviceFilters
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "deviceFilters")
    -
    -doCommonSubexpressionElimination ::
    -                                 forall msg msg' .
    -                                   Data.ProtoLens.HasField "doCommonSubexpressionElimination" msg
    -                                     msg' =>
    -                                   Lens.Family2.Lens msg msg'
    -                                     (Data.ProtoLens.Field "doCommonSubexpressionElimination" msg)
    -                                     (Data.ProtoLens.Field "doCommonSubexpressionElimination" msg')
    -doCommonSubexpressionElimination
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "doCommonSubexpressionElimination")
    -
    -doConstantFolding ::
    -                  forall msg msg' .
    -                    Data.ProtoLens.HasField "doConstantFolding" msg msg' =>
    -                    Lens.Family2.Lens msg msg'
    -                      (Data.ProtoLens.Field "doConstantFolding" msg)
    -                      (Data.ProtoLens.Field "doConstantFolding" msg')
    -doConstantFolding
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "doConstantFolding")
    -
    -doFunctionInlining ::
    -                   forall msg msg' .
    -                     Data.ProtoLens.HasField "doFunctionInlining" msg msg' =>
    -                     Lens.Family2.Lens msg msg'
    -                       (Data.ProtoLens.Field "doFunctionInlining" msg)
    -                       (Data.ProtoLens.Field "doFunctionInlining" msg')
    -doFunctionInlining
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "doFunctionInlining")
    -
    -enableBfloat16Sendrecv ::
    -                       forall msg msg' .
    -                         Data.ProtoLens.HasField "enableBfloat16Sendrecv" msg msg' =>
    -                         Lens.Family2.Lens msg msg'
    -                           (Data.ProtoLens.Field "enableBfloat16Sendrecv" msg)
    -                           (Data.ProtoLens.Field "enableBfloat16Sendrecv" msg')
    -enableBfloat16Sendrecv
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "enableBfloat16Sendrecv")
    -
    -enableRecvScheduling ::
    -                     forall msg msg' .
    -                       Data.ProtoLens.HasField "enableRecvScheduling" msg msg' =>
    -                       Lens.Family2.Lens msg msg'
    -                         (Data.ProtoLens.Field "enableRecvScheduling" msg)
    -                         (Data.ProtoLens.Field "enableRecvScheduling" msg')
    -enableRecvScheduling
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "enableRecvScheduling")
    -
    -gpuOptions ::
    -           forall msg msg' . Data.ProtoLens.HasField "gpuOptions" msg msg' =>
    -             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "gpuOptions" msg)
    -               (Data.ProtoLens.Field "gpuOptions" msg')
    -gpuOptions
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "gpuOptions")
    -
    -graphOptions ::
    -             forall msg msg' .
    -               Data.ProtoLens.HasField "graphOptions" msg msg' =>
    -               Lens.Family2.Lens msg msg'
    -                 (Data.ProtoLens.Field "graphOptions" msg)
    -                 (Data.ProtoLens.Field "graphOptions" msg')
    -graphOptions
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "graphOptions")
    -
    -inferShapes ::
    -            forall msg msg' . Data.ProtoLens.HasField "inferShapes" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "inferShapes" msg)
    -                (Data.ProtoLens.Field "inferShapes" msg')
    -inferShapes
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "inferShapes")
    -
    -interOpParallelismThreads ::
    -                          forall msg msg' .
    -                            Data.ProtoLens.HasField "interOpParallelismThreads" msg msg' =>
    -                            Lens.Family2.Lens msg msg'
    -                              (Data.ProtoLens.Field "interOpParallelismThreads" msg)
    -                              (Data.ProtoLens.Field "interOpParallelismThreads" msg')
    -interOpParallelismThreads
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "interOpParallelismThreads")
    -
    -interOpThreadPool ::
    -                  forall msg msg' .
    -                    Data.ProtoLens.HasField "interOpThreadPool" msg msg' =>
    -                    Lens.Family2.Lens msg msg'
    -                      (Data.ProtoLens.Field "interOpThreadPool" msg)
    -                      (Data.ProtoLens.Field "interOpThreadPool" msg')
    -interOpThreadPool
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "interOpThreadPool")
    -
    -intraOpParallelismThreads ::
    -                          forall msg msg' .
    -                            Data.ProtoLens.HasField "intraOpParallelismThreads" msg msg' =>
    -                            Lens.Family2.Lens msg msg'
    -                              (Data.ProtoLens.Field "intraOpParallelismThreads" msg)
    -                              (Data.ProtoLens.Field "intraOpParallelismThreads" msg')
    -intraOpParallelismThreads
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "intraOpParallelismThreads")
    -
    -key ::
    -    forall msg msg' . Data.ProtoLens.HasField "key" msg msg' =>
    -      Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "key" msg)
    -        (Data.ProtoLens.Field "key" msg')
    -key
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "key")
    -
    -logDevicePlacement ::
    -                   forall msg msg' .
    -                     Data.ProtoLens.HasField "logDevicePlacement" msg msg' =>
    -                     Lens.Family2.Lens msg msg'
    -                       (Data.ProtoLens.Field "logDevicePlacement" msg)
    -                       (Data.ProtoLens.Field "logDevicePlacement" msg')
    -logDevicePlacement
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "logDevicePlacement")
    -
    -maybe'costGraph ::
    -                forall msg msg' .
    -                  Data.ProtoLens.HasField "maybe'costGraph" msg msg' =>
    -                  Lens.Family2.Lens msg msg'
    -                    (Data.ProtoLens.Field "maybe'costGraph" msg)
    -                    (Data.ProtoLens.Field "maybe'costGraph" msg')
    -maybe'costGraph
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'costGraph")
    -
    -maybe'gpuOptions ::
    -                 forall msg msg' .
    -                   Data.ProtoLens.HasField "maybe'gpuOptions" msg msg' =>
    -                   Lens.Family2.Lens msg msg'
    -                     (Data.ProtoLens.Field "maybe'gpuOptions" msg)
    -                     (Data.ProtoLens.Field "maybe'gpuOptions" msg')
    -maybe'gpuOptions
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'gpuOptions")
    -
    -maybe'graphOptions ::
    -                   forall msg msg' .
    -                     Data.ProtoLens.HasField "maybe'graphOptions" msg msg' =>
    -                     Lens.Family2.Lens msg msg'
    -                       (Data.ProtoLens.Field "maybe'graphOptions" msg)
    -                       (Data.ProtoLens.Field "maybe'graphOptions" msg')
    -maybe'graphOptions
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'graphOptions")
    -
    -maybe'optimizerOptions ::
    -                       forall msg msg' .
    -                         Data.ProtoLens.HasField "maybe'optimizerOptions" msg msg' =>
    -                         Lens.Family2.Lens msg msg'
    -                           (Data.ProtoLens.Field "maybe'optimizerOptions" msg)
    -                           (Data.ProtoLens.Field "maybe'optimizerOptions" msg')
    -maybe'optimizerOptions
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'optimizerOptions")
    -
    -maybe'stepStats ::
    -                forall msg msg' .
    -                  Data.ProtoLens.HasField "maybe'stepStats" msg msg' =>
    -                  Lens.Family2.Lens msg msg'
    -                    (Data.ProtoLens.Field "maybe'stepStats" msg)
    -                    (Data.ProtoLens.Field "maybe'stepStats" msg')
    -maybe'stepStats
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "maybe'stepStats")
    -
    -nodeName ::
    -         forall msg msg' . Data.ProtoLens.HasField "nodeName" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "nodeName" msg)
    -             (Data.ProtoLens.Field "nodeName" msg')
    -nodeName
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "nodeName")
    -
    -numThreads ::
    -           forall msg msg' . Data.ProtoLens.HasField "numThreads" msg msg' =>
    -             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "numThreads" msg)
    -               (Data.ProtoLens.Field "numThreads" msg')
    -numThreads
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "numThreads")
    -
    -operationTimeoutInMs ::
    -                     forall msg msg' .
    -                       Data.ProtoLens.HasField "operationTimeoutInMs" msg msg' =>
    -                       Lens.Family2.Lens msg msg'
    -                         (Data.ProtoLens.Field "operationTimeoutInMs" msg)
    -                         (Data.ProtoLens.Field "operationTimeoutInMs" msg')
    -operationTimeoutInMs
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "operationTimeoutInMs")
    -
    -optLevel ::
    -         forall msg msg' . Data.ProtoLens.HasField "optLevel" msg msg' =>
    -           Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "optLevel" msg)
    -             (Data.ProtoLens.Field "optLevel" msg')
    -optLevel
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "optLevel")
    -
    -optimizerOptions ::
    -                 forall msg msg' .
    -                   Data.ProtoLens.HasField "optimizerOptions" msg msg' =>
    -                   Lens.Family2.Lens msg msg'
    -                     (Data.ProtoLens.Field "optimizerOptions" msg)
    -                     (Data.ProtoLens.Field "optimizerOptions" msg')
    -optimizerOptions
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "optimizerOptions")
    -
    -outputPartitionGraphs ::
    -                      forall msg msg' .
    -                        Data.ProtoLens.HasField "outputPartitionGraphs" msg msg' =>
    -                        Lens.Family2.Lens msg msg'
    -                          (Data.ProtoLens.Field "outputPartitionGraphs" msg)
    -                          (Data.ProtoLens.Field "outputPartitionGraphs" msg')
    -outputPartitionGraphs
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "outputPartitionGraphs")
    -
    -outputSlot ::
    -           forall msg msg' . Data.ProtoLens.HasField "outputSlot" msg msg' =>
    -             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "outputSlot" msg)
    -               (Data.ProtoLens.Field "outputSlot" msg')
    -outputSlot
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "outputSlot")
    -
    -partitionGraphs ::
    -                forall msg msg' .
    -                  Data.ProtoLens.HasField "partitionGraphs" msg msg' =>
    -                  Lens.Family2.Lens msg msg'
    -                    (Data.ProtoLens.Field "partitionGraphs" msg)
    -                    (Data.ProtoLens.Field "partitionGraphs" msg')
    -partitionGraphs
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "partitionGraphs")
    -
    -perProcessGpuMemoryFraction ::
    -                            forall msg msg' .
    -                              Data.ProtoLens.HasField "perProcessGpuMemoryFraction" msg msg' =>
    -                              Lens.Family2.Lens msg msg'
    -                                (Data.ProtoLens.Field "perProcessGpuMemoryFraction" msg)
    -                                (Data.ProtoLens.Field "perProcessGpuMemoryFraction" msg')
    -perProcessGpuMemoryFraction
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "perProcessGpuMemoryFraction")
    -
    -placePrunedGraph ::
    -                 forall msg msg' .
    -                   Data.ProtoLens.HasField "placePrunedGraph" msg msg' =>
    -                   Lens.Family2.Lens msg msg'
    -                     (Data.ProtoLens.Field "placePrunedGraph" msg)
    -                     (Data.ProtoLens.Field "placePrunedGraph" msg')
    -placePrunedGraph
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "placePrunedGraph")
    -
    -placementPeriod ::
    -                forall msg msg' .
    -                  Data.ProtoLens.HasField "placementPeriod" msg msg' =>
    -                  Lens.Family2.Lens msg msg'
    -                    (Data.ProtoLens.Field "placementPeriod" msg)
    -                    (Data.ProtoLens.Field "placementPeriod" msg')
    -placementPeriod
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "placementPeriod")
    -
    -sessionInterOpThreadPool ::
    -                         forall msg msg' .
    -                           Data.ProtoLens.HasField "sessionInterOpThreadPool" msg msg' =>
    -                           Lens.Family2.Lens msg msg'
    -                             (Data.ProtoLens.Field "sessionInterOpThreadPool" msg)
    -                             (Data.ProtoLens.Field "sessionInterOpThreadPool" msg')
    -sessionInterOpThreadPool
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "sessionInterOpThreadPool")
    -
    -stepStats ::
    -          forall msg msg' . Data.ProtoLens.HasField "stepStats" msg msg' =>
    -            Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "stepStats" msg)
    -              (Data.ProtoLens.Field "stepStats" msg')
    -stepStats
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "stepStats")
    -
    -timelineStep ::
    -             forall msg msg' .
    -               Data.ProtoLens.HasField "timelineStep" msg msg' =>
    -               Lens.Family2.Lens msg msg'
    -                 (Data.ProtoLens.Field "timelineStep" msg)
    -                 (Data.ProtoLens.Field "timelineStep" msg')
    -timelineStep
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "timelineStep")
    -
    -timeoutInMs ::
    -            forall msg msg' . Data.ProtoLens.HasField "timeoutInMs" msg msg' =>
    -              Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "timeoutInMs" msg)
    -                (Data.ProtoLens.Field "timeoutInMs" msg')
    -timeoutInMs
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "timeoutInMs")
    -
    -traceLevel ::
    -           forall msg msg' . Data.ProtoLens.HasField "traceLevel" msg msg' =>
    -             Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "traceLevel" msg)
    -               (Data.ProtoLens.Field "traceLevel" msg')
    -traceLevel
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "traceLevel")
    -
    -usePerSessionThreads ::
    -                     forall msg msg' .
    -                       Data.ProtoLens.HasField "usePerSessionThreads" msg msg' =>
    -                       Lens.Family2.Lens msg msg'
    -                         (Data.ProtoLens.Field "usePerSessionThreads" msg)
    -                         (Data.ProtoLens.Field "usePerSessionThreads" msg')
    -usePerSessionThreads
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "usePerSessionThreads")
    -
    -value ::
    -      forall msg msg' . Data.ProtoLens.HasField "value" msg msg' =>
    -        Lens.Family2.Lens msg msg' (Data.ProtoLens.Field "value" msg)
    -          (Data.ProtoLens.Field "value" msg')
    -value
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym :: Data.ProtoLens.ProxySym "value")
    -
    -visibleDeviceList ::
    -                  forall msg msg' .
    -                    Data.ProtoLens.HasField "visibleDeviceList" msg msg' =>
    -                    Lens.Family2.Lens msg msg'
    -                      (Data.ProtoLens.Field "visibleDeviceList" msg)
    -                      (Data.ProtoLens.Field "visibleDeviceList" msg')
    -visibleDeviceList
    -  = Data.ProtoLens.field
    -      (Data.ProtoLens.ProxySym ::
    -         Data.ProtoLens.ProxySym "visibleDeviceList")
    -
    - diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-proto-0.1.0.0/src/hscolour.css deleted file mode 100644 index c15919e..0000000 --- a/docs/haddock/tensorflow-proto-0.1.0.0/src/hscolour.css +++ /dev/null @@ -1,5 +0,0 @@ -.hs-keyglyph, .hs-layout {color: red;} -.hs-keyword {color: blue;} -.hs-comment, .hs-comment a {color: green;} -.hs-str, .hs-chr {color: teal;} -.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt b/docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt index 095759b..b46d1f8 100644 --- a/docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt +++ b/docs/haddock/tensorflow-proto-0.1.0.0/tensorflow-proto.txt @@ -10,12 +10,12 @@ module Proto.Tensorflow.Core.Framework.ResourceHandle data ResourceHandle -ResourceHandle :: Text -> Text -> Text -> Word64 -> Text -> ResourceHandle -[_ResourceHandle'device] :: ResourceHandle -> Text -[_ResourceHandle'container] :: ResourceHandle -> Text -[_ResourceHandle'name] :: ResourceHandle -> Text -[_ResourceHandle'hashCode] :: ResourceHandle -> Word64 -[_ResourceHandle'maybeTypeName] :: ResourceHandle -> Text +ResourceHandle :: !Text -> !Text -> !Text -> !Word64 -> !Text -> ResourceHandle +[_ResourceHandle'device] :: ResourceHandle -> !Text +[_ResourceHandle'container] :: ResourceHandle -> !Text +[_ResourceHandle'name] :: ResourceHandle -> !Text +[_ResourceHandle'hashCode] :: ResourceHandle -> !Word64 +[_ResourceHandle'maybeTypeName] :: ResourceHandle -> !Text container :: HasField "container" msg msg' => Lens msg msg' (Field "container" msg) (Field "container" msg') device :: HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg') hashCode :: HasField "hashCode" msg msg' => Lens msg msg' (Field "hashCode" msg) (Field "hashCode" msg') @@ -84,13 +84,13 @@ instance GHC.Enum.Bounded Proto.Tensorflow.Core.Framework.Types.DataType module Proto.Tensorflow.Core.Framework.TensorShape data TensorShapeProto -TensorShapeProto :: [TensorShapeProto'Dim] -> Bool -> TensorShapeProto -[_TensorShapeProto'dim] :: TensorShapeProto -> [TensorShapeProto'Dim] -[_TensorShapeProto'unknownRank] :: TensorShapeProto -> Bool +TensorShapeProto :: ![TensorShapeProto'Dim] -> !Bool -> TensorShapeProto +[_TensorShapeProto'dim] :: TensorShapeProto -> ![TensorShapeProto'Dim] +[_TensorShapeProto'unknownRank] :: TensorShapeProto -> !Bool data TensorShapeProto'Dim -TensorShapeProto'Dim :: Int64 -> Text -> TensorShapeProto'Dim -[_TensorShapeProto'Dim'size] :: TensorShapeProto'Dim -> Int64 -[_TensorShapeProto'Dim'name] :: TensorShapeProto'Dim -> Text +TensorShapeProto'Dim :: !Int64 -> !Text -> TensorShapeProto'Dim +[_TensorShapeProto'Dim'size] :: TensorShapeProto'Dim -> !Int64 +[_TensorShapeProto'Dim'name] :: TensorShapeProto'Dim -> !Text dim :: HasField "dim" msg msg' => Lens msg msg' (Field "dim" msg) (Field "dim" msg') name :: HasField "name" msg msg' => Lens msg msg' (Field "name" msg) (Field "name" msg') size :: HasField "size" msg msg' => Lens msg msg' (Field "size" msg) (Field "size" msg') @@ -110,21 +110,21 @@ instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.TensorSh module Proto.Tensorflow.Core.Framework.Tensor data TensorProto -TensorProto :: DataType -> Maybe TensorShapeProto -> Int32 -> ByteString -> [Int32] -> [Float] -> [Double] -> [Int32] -> [ByteString] -> [Float] -> [Int64] -> [Bool] -> [Double] -> [ResourceHandle] -> TensorProto -[_TensorProto'dtype] :: TensorProto -> DataType -[_TensorProto'tensorShape] :: TensorProto -> Maybe TensorShapeProto -[_TensorProto'versionNumber] :: TensorProto -> Int32 -[_TensorProto'tensorContent] :: TensorProto -> ByteString -[_TensorProto'halfVal] :: TensorProto -> [Int32] -[_TensorProto'floatVal] :: TensorProto -> [Float] -[_TensorProto'doubleVal] :: TensorProto -> [Double] -[_TensorProto'intVal] :: TensorProto -> [Int32] -[_TensorProto'stringVal] :: TensorProto -> [ByteString] -[_TensorProto'scomplexVal] :: TensorProto -> [Float] -[_TensorProto'int64Val] :: TensorProto -> [Int64] -[_TensorProto'boolVal] :: TensorProto -> [Bool] -[_TensorProto'dcomplexVal] :: TensorProto -> [Double] -[_TensorProto'resourceHandleVal] :: TensorProto -> [ResourceHandle] +TensorProto :: !DataType -> !(Maybe TensorShapeProto) -> !Int32 -> !ByteString -> ![Int32] -> ![Float] -> ![Double] -> ![Int32] -> ![ByteString] -> ![Float] -> ![Int64] -> ![Bool] -> ![Double] -> ![ResourceHandle] -> TensorProto +[_TensorProto'dtype] :: TensorProto -> !DataType +[_TensorProto'tensorShape] :: TensorProto -> !(Maybe TensorShapeProto) +[_TensorProto'versionNumber] :: TensorProto -> !Int32 +[_TensorProto'tensorContent] :: TensorProto -> !ByteString +[_TensorProto'halfVal] :: TensorProto -> ![Int32] +[_TensorProto'floatVal] :: TensorProto -> ![Float] +[_TensorProto'doubleVal] :: TensorProto -> ![Double] +[_TensorProto'intVal] :: TensorProto -> ![Int32] +[_TensorProto'stringVal] :: TensorProto -> ![ByteString] +[_TensorProto'scomplexVal] :: TensorProto -> ![Float] +[_TensorProto'int64Val] :: TensorProto -> ![Int64] +[_TensorProto'boolVal] :: TensorProto -> ![Bool] +[_TensorProto'dcomplexVal] :: TensorProto -> ![Double] +[_TensorProto'resourceHandleVal] :: TensorProto -> ![ResourceHandle] boolVal :: HasField "boolVal" msg msg' => Lens msg msg' (Field "boolVal" msg) (Field "boolVal" msg') dcomplexVal :: HasField "dcomplexVal" msg msg' => Lens msg msg' (Field "dcomplexVal" msg) (Field "dcomplexVal" msg') doubleVal :: HasField "doubleVal" msg msg' => Lens msg msg' (Field "doubleVal" msg) (Field "doubleVal" msg') @@ -160,36 +160,279 @@ instance Data.ProtoLens.Field.HasField "resourceHandleVal" Proto.Tensorflow.Core instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Tensor.TensorProto instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Tensor.TensorProto +module Proto.Tensorflow.Core.Framework.Summary +data HistogramProto +HistogramProto :: !Double -> !Double -> !Double -> !Double -> !Double -> ![Double] -> ![Double] -> HistogramProto +[_HistogramProto'min] :: HistogramProto -> !Double +[_HistogramProto'max] :: HistogramProto -> !Double +[_HistogramProto'num] :: HistogramProto -> !Double +[_HistogramProto'sum] :: HistogramProto -> !Double +[_HistogramProto'sumSquares] :: HistogramProto -> !Double +[_HistogramProto'bucketLimit] :: HistogramProto -> ![Double] +[_HistogramProto'bucket] :: HistogramProto -> ![Double] +data Summary +Summary :: ![Summary'Value] -> Summary +[_Summary'value] :: Summary -> ![Summary'Value] +data Summary'Audio +Summary'Audio :: !Float -> !Int64 -> !Int64 -> !ByteString -> !Text -> Summary'Audio +[_Summary'Audio'sampleRate] :: Summary'Audio -> !Float +[_Summary'Audio'numChannels] :: Summary'Audio -> !Int64 +[_Summary'Audio'lengthFrames] :: Summary'Audio -> !Int64 +[_Summary'Audio'encodedAudioString] :: Summary'Audio -> !ByteString +[_Summary'Audio'contentType] :: Summary'Audio -> !Text +data Summary'Image +Summary'Image :: !Int32 -> !Int32 -> !Int32 -> !ByteString -> Summary'Image +[_Summary'Image'height] :: Summary'Image -> !Int32 +[_Summary'Image'width] :: Summary'Image -> !Int32 +[_Summary'Image'colorspace] :: Summary'Image -> !Int32 +[_Summary'Image'encodedImageString] :: Summary'Image -> !ByteString +data Summary'Value +Summary'Value :: !Text -> !Text -> !(Maybe Float) -> !(Maybe ByteString) -> !(Maybe Summary'Image) -> !(Maybe HistogramProto) -> !(Maybe Summary'Audio) -> !(Maybe TensorProto) -> Summary'Value +[_Summary'Value'nodeName] :: Summary'Value -> !Text +[_Summary'Value'tag] :: Summary'Value -> !Text +[_Summary'Value'simpleValue] :: Summary'Value -> !(Maybe Float) +[_Summary'Value'obsoleteOldStyleHistogram] :: Summary'Value -> !(Maybe ByteString) +[_Summary'Value'image] :: Summary'Value -> !(Maybe Summary'Image) +[_Summary'Value'histo] :: Summary'Value -> !(Maybe HistogramProto) +[_Summary'Value'audio] :: Summary'Value -> !(Maybe Summary'Audio) +[_Summary'Value'tensor] :: Summary'Value -> !(Maybe TensorProto) +data SummaryDescription +SummaryDescription :: !Text -> SummaryDescription +[_SummaryDescription'typeHint] :: SummaryDescription -> !Text +audio :: HasField "audio" msg msg' => Lens msg msg' (Field "audio" msg) (Field "audio" msg') +bucket :: HasField "bucket" msg msg' => Lens msg msg' (Field "bucket" msg) (Field "bucket" msg') +bucketLimit :: HasField "bucketLimit" msg msg' => Lens msg msg' (Field "bucketLimit" msg) (Field "bucketLimit" msg') +colorspace :: HasField "colorspace" msg msg' => Lens msg msg' (Field "colorspace" msg) (Field "colorspace" msg') +contentType :: HasField "contentType" msg msg' => Lens msg msg' (Field "contentType" msg) (Field "contentType" msg') +encodedAudioString :: HasField "encodedAudioString" msg msg' => Lens msg msg' (Field "encodedAudioString" msg) (Field "encodedAudioString" msg') +encodedImageString :: HasField "encodedImageString" msg msg' => Lens msg msg' (Field "encodedImageString" msg) (Field "encodedImageString" msg') +height :: HasField "height" msg msg' => Lens msg msg' (Field "height" msg) (Field "height" msg') +histo :: HasField "histo" msg msg' => Lens msg msg' (Field "histo" msg) (Field "histo" msg') +image :: HasField "image" msg msg' => Lens msg msg' (Field "image" msg) (Field "image" msg') +lengthFrames :: HasField "lengthFrames" msg msg' => Lens msg msg' (Field "lengthFrames" msg) (Field "lengthFrames" msg') +max :: HasField "max" msg msg' => Lens msg msg' (Field "max" msg) (Field "max" msg') +maybe'audio :: HasField "maybe'audio" msg msg' => Lens msg msg' (Field "maybe'audio" msg) (Field "maybe'audio" msg') +maybe'histo :: HasField "maybe'histo" msg msg' => Lens msg msg' (Field "maybe'histo" msg) (Field "maybe'histo" msg') +maybe'image :: HasField "maybe'image" msg msg' => Lens msg msg' (Field "maybe'image" msg) (Field "maybe'image" msg') +maybe'obsoleteOldStyleHistogram :: HasField "maybe'obsoleteOldStyleHistogram" msg msg' => Lens msg msg' (Field "maybe'obsoleteOldStyleHistogram" msg) (Field "maybe'obsoleteOldStyleHistogram" msg') +maybe'simpleValue :: HasField "maybe'simpleValue" msg msg' => Lens msg msg' (Field "maybe'simpleValue" msg) (Field "maybe'simpleValue" msg') +maybe'tensor :: HasField "maybe'tensor" msg msg' => Lens msg msg' (Field "maybe'tensor" msg) (Field "maybe'tensor" msg') +min :: HasField "min" msg msg' => Lens msg msg' (Field "min" msg) (Field "min" msg') +nodeName :: HasField "nodeName" msg msg' => Lens msg msg' (Field "nodeName" msg) (Field "nodeName" msg') +num :: HasField "num" msg msg' => Lens msg msg' (Field "num" msg) (Field "num" msg') +numChannels :: HasField "numChannels" msg msg' => Lens msg msg' (Field "numChannels" msg) (Field "numChannels" msg') +obsoleteOldStyleHistogram :: HasField "obsoleteOldStyleHistogram" msg msg' => Lens msg msg' (Field "obsoleteOldStyleHistogram" msg) (Field "obsoleteOldStyleHistogram" msg') +sampleRate :: HasField "sampleRate" msg msg' => Lens msg msg' (Field "sampleRate" msg) (Field "sampleRate" msg') +simpleValue :: HasField "simpleValue" msg msg' => Lens msg msg' (Field "simpleValue" msg) (Field "simpleValue" msg') +sum :: HasField "sum" msg msg' => Lens msg msg' (Field "sum" msg) (Field "sum" msg') +sumSquares :: HasField "sumSquares" msg msg' => Lens msg msg' (Field "sumSquares" msg) (Field "sumSquares" msg') +tag :: HasField "tag" msg msg' => Lens msg msg' (Field "tag" msg) (Field "tag" msg') +tensor :: HasField "tensor" msg msg' => Lens msg msg' (Field "tensor" msg) (Field "tensor" msg') +typeHint :: HasField "typeHint" msg msg' => Lens msg msg' (Field "typeHint" msg) (Field "typeHint" msg') +value :: HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') +width :: HasField "width" msg msg' => Lens msg msg' (Field "width" msg) (Field "width" msg') +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.SummaryDescription +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.SummaryDescription +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.Summary +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.Summary +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.Summary'Image +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.Summary'Image +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.Summary'Audio +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.Summary'Audio +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.Summary.HistogramProto +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.Summary.HistogramProto +instance Data.ProtoLens.Field.HasField "min" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto +instance Data.ProtoLens.Field.HasField "max" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto +instance Data.ProtoLens.Field.HasField "num" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto +instance Data.ProtoLens.Field.HasField "sum" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto +instance Data.ProtoLens.Field.HasField "sumSquares" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto +instance Data.ProtoLens.Field.HasField "bucketLimit" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto +instance Data.ProtoLens.Field.HasField "bucket" Proto.Tensorflow.Core.Framework.Summary.HistogramProto Proto.Tensorflow.Core.Framework.Summary.HistogramProto +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.HistogramProto +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.HistogramProto +instance Data.ProtoLens.Field.HasField "value" Proto.Tensorflow.Core.Framework.Summary.Summary Proto.Tensorflow.Core.Framework.Summary.Summary +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.Summary +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.Summary +instance Data.ProtoLens.Field.HasField "sampleRate" Proto.Tensorflow.Core.Framework.Summary.Summary'Audio Proto.Tensorflow.Core.Framework.Summary.Summary'Audio +instance Data.ProtoLens.Field.HasField "numChannels" Proto.Tensorflow.Core.Framework.Summary.Summary'Audio Proto.Tensorflow.Core.Framework.Summary.Summary'Audio +instance Data.ProtoLens.Field.HasField "lengthFrames" Proto.Tensorflow.Core.Framework.Summary.Summary'Audio Proto.Tensorflow.Core.Framework.Summary.Summary'Audio +instance Data.ProtoLens.Field.HasField "encodedAudioString" Proto.Tensorflow.Core.Framework.Summary.Summary'Audio Proto.Tensorflow.Core.Framework.Summary.Summary'Audio +instance Data.ProtoLens.Field.HasField "contentType" Proto.Tensorflow.Core.Framework.Summary.Summary'Audio Proto.Tensorflow.Core.Framework.Summary.Summary'Audio +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.Summary'Audio +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.Summary'Audio +instance Data.ProtoLens.Field.HasField "height" Proto.Tensorflow.Core.Framework.Summary.Summary'Image Proto.Tensorflow.Core.Framework.Summary.Summary'Image +instance Data.ProtoLens.Field.HasField "width" Proto.Tensorflow.Core.Framework.Summary.Summary'Image Proto.Tensorflow.Core.Framework.Summary.Summary'Image +instance Data.ProtoLens.Field.HasField "colorspace" Proto.Tensorflow.Core.Framework.Summary.Summary'Image Proto.Tensorflow.Core.Framework.Summary.Summary'Image +instance Data.ProtoLens.Field.HasField "encodedImageString" Proto.Tensorflow.Core.Framework.Summary.Summary'Image Proto.Tensorflow.Core.Framework.Summary.Summary'Image +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.Summary'Image +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.Summary'Image +instance Data.ProtoLens.Field.HasField "nodeName" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "tag" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "simpleValue" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "maybe'simpleValue" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "obsoleteOldStyleHistogram" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "maybe'obsoleteOldStyleHistogram" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "image" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "maybe'image" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "histo" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "maybe'histo" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "audio" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "maybe'audio" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "tensor" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "maybe'tensor" Proto.Tensorflow.Core.Framework.Summary.Summary'Value Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.Summary'Value +instance Data.ProtoLens.Field.HasField "typeHint" Proto.Tensorflow.Core.Framework.Summary.SummaryDescription Proto.Tensorflow.Core.Framework.Summary.SummaryDescription +instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.Summary.SummaryDescription +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Summary.SummaryDescription + +module Proto.Tensorflow.Core.Util.Event +data Event +Event :: !Double -> !Int64 -> !(Maybe Text) -> !(Maybe ByteString) -> !(Maybe Summary) -> !(Maybe LogMessage) -> !(Maybe SessionLog) -> !(Maybe TaggedRunMetadata) -> !(Maybe ByteString) -> Event +[_Event'wallTime] :: Event -> !Double +[_Event'step] :: Event -> !Int64 +[_Event'fileVersion] :: Event -> !(Maybe Text) +[_Event'graphDef] :: Event -> !(Maybe ByteString) +[_Event'summary] :: Event -> !(Maybe Summary) +[_Event'logMessage] :: Event -> !(Maybe LogMessage) +[_Event'sessionLog] :: Event -> !(Maybe SessionLog) +[_Event'taggedRunMetadata] :: Event -> !(Maybe TaggedRunMetadata) +[_Event'metaGraphDef] :: Event -> !(Maybe ByteString) +data LogMessage +LogMessage :: !LogMessage'Level -> !Text -> LogMessage +[_LogMessage'level] :: LogMessage -> !LogMessage'Level +[_LogMessage'message] :: LogMessage -> !Text +data LogMessage'Level +LogMessage'UNKNOWN :: LogMessage'Level +LogMessage'DEBUG :: LogMessage'Level +LogMessage'INFO :: LogMessage'Level +LogMessage'WARN :: LogMessage'Level +LogMessage'ERROR :: LogMessage'Level +LogMessage'FATAL :: LogMessage'Level +data SessionLog +SessionLog :: !SessionLog'SessionStatus -> !Text -> !Text -> SessionLog +[_SessionLog'status] :: SessionLog -> !SessionLog'SessionStatus +[_SessionLog'checkpointPath] :: SessionLog -> !Text +[_SessionLog'msg] :: SessionLog -> !Text +data SessionLog'SessionStatus +SessionLog'STATUS_UNSPECIFIED :: SessionLog'SessionStatus +SessionLog'START :: SessionLog'SessionStatus +SessionLog'STOP :: SessionLog'SessionStatus +SessionLog'CHECKPOINT :: SessionLog'SessionStatus +data TaggedRunMetadata +TaggedRunMetadata :: !Text -> !ByteString -> TaggedRunMetadata +[_TaggedRunMetadata'tag] :: TaggedRunMetadata -> !Text +[_TaggedRunMetadata'runMetadata] :: TaggedRunMetadata -> !ByteString +checkpointPath :: HasField "checkpointPath" msg msg' => Lens msg msg' (Field "checkpointPath" msg) (Field "checkpointPath" msg') +fileVersion :: HasField "fileVersion" msg msg' => Lens msg msg' (Field "fileVersion" msg) (Field "fileVersion" msg') +graphDef :: HasField "graphDef" msg msg' => Lens msg msg' (Field "graphDef" msg) (Field "graphDef" msg') +level :: HasField "level" msg msg' => Lens msg msg' (Field "level" msg) (Field "level" msg') +logMessage :: HasField "logMessage" msg msg' => Lens msg msg' (Field "logMessage" msg) (Field "logMessage" msg') +maybe'fileVersion :: HasField "maybe'fileVersion" msg msg' => Lens msg msg' (Field "maybe'fileVersion" msg) (Field "maybe'fileVersion" msg') +maybe'graphDef :: HasField "maybe'graphDef" msg msg' => Lens msg msg' (Field "maybe'graphDef" msg) (Field "maybe'graphDef" msg') +maybe'logMessage :: HasField "maybe'logMessage" msg msg' => Lens msg msg' (Field "maybe'logMessage" msg) (Field "maybe'logMessage" msg') +maybe'metaGraphDef :: HasField "maybe'metaGraphDef" msg msg' => Lens msg msg' (Field "maybe'metaGraphDef" msg) (Field "maybe'metaGraphDef" msg') +maybe'sessionLog :: HasField "maybe'sessionLog" msg msg' => Lens msg msg' (Field "maybe'sessionLog" msg) (Field "maybe'sessionLog" msg') +maybe'summary :: HasField "maybe'summary" msg msg' => Lens msg msg' (Field "maybe'summary" msg) (Field "maybe'summary" msg') +maybe'taggedRunMetadata :: HasField "maybe'taggedRunMetadata" msg msg' => Lens msg msg' (Field "maybe'taggedRunMetadata" msg) (Field "maybe'taggedRunMetadata" msg') +message :: HasField "message" msg msg' => Lens msg msg' (Field "message" msg) (Field "message" msg') +metaGraphDef :: HasField "metaGraphDef" msg msg' => Lens msg msg' (Field "metaGraphDef" msg) (Field "metaGraphDef" msg') +msg :: HasField "msg" msg msg' => Lens msg msg' (Field "msg" msg) (Field "msg" msg') +runMetadata :: HasField "runMetadata" msg msg' => Lens msg msg' (Field "runMetadata" msg) (Field "runMetadata" msg') +sessionLog :: HasField "sessionLog" msg msg' => Lens msg msg' (Field "sessionLog" msg) (Field "sessionLog" msg') +status :: HasField "status" msg msg' => Lens msg msg' (Field "status" msg) (Field "status" msg') +step :: HasField "step" msg msg' => Lens msg msg' (Field "step" msg) (Field "step" msg') +summary :: HasField "summary" msg msg' => Lens msg msg' (Field "summary" msg) (Field "summary" msg') +tag :: HasField "tag" msg msg' => Lens msg msg' (Field "tag" msg) (Field "tag" msg') +taggedRunMetadata :: HasField "taggedRunMetadata" msg msg' => Lens msg msg' (Field "taggedRunMetadata" msg) (Field "taggedRunMetadata" msg') +wallTime :: HasField "wallTime" msg msg' => Lens msg msg' (Field "wallTime" msg) (Field "wallTime" msg') +instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.Event +instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.Event +instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata +instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata +instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.SessionLog +instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.SessionLog +instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus +instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus +instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.LogMessage +instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.LogMessage +instance GHC.Classes.Eq Proto.Tensorflow.Core.Util.Event.LogMessage'Level +instance GHC.Show.Show Proto.Tensorflow.Core.Util.Event.LogMessage'Level +instance Data.ProtoLens.Field.HasField "wallTime" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "step" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "fileVersion" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "maybe'fileVersion" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "graphDef" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "maybe'graphDef" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "summary" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "maybe'summary" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "logMessage" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "maybe'logMessage" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "sessionLog" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "maybe'sessionLog" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "taggedRunMetadata" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "maybe'taggedRunMetadata" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "metaGraphDef" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "maybe'metaGraphDef" Proto.Tensorflow.Core.Util.Event.Event Proto.Tensorflow.Core.Util.Event.Event +instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Util.Event.Event +instance Data.ProtoLens.Field.HasField "level" Proto.Tensorflow.Core.Util.Event.LogMessage Proto.Tensorflow.Core.Util.Event.LogMessage +instance Data.ProtoLens.Field.HasField "message" Proto.Tensorflow.Core.Util.Event.LogMessage Proto.Tensorflow.Core.Util.Event.LogMessage +instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.LogMessage +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Util.Event.LogMessage +instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.LogMessage'Level +instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Util.Event.LogMessage'Level +instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Util.Event.LogMessage'Level +instance GHC.Enum.Enum Proto.Tensorflow.Core.Util.Event.LogMessage'Level +instance GHC.Enum.Bounded Proto.Tensorflow.Core.Util.Event.LogMessage'Level +instance Data.ProtoLens.Field.HasField "status" Proto.Tensorflow.Core.Util.Event.SessionLog Proto.Tensorflow.Core.Util.Event.SessionLog +instance Data.ProtoLens.Field.HasField "checkpointPath" Proto.Tensorflow.Core.Util.Event.SessionLog Proto.Tensorflow.Core.Util.Event.SessionLog +instance Data.ProtoLens.Field.HasField "msg" Proto.Tensorflow.Core.Util.Event.SessionLog Proto.Tensorflow.Core.Util.Event.SessionLog +instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.SessionLog +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Util.Event.SessionLog +instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus +instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus +instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus +instance GHC.Enum.Enum Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus +instance GHC.Enum.Bounded Proto.Tensorflow.Core.Util.Event.SessionLog'SessionStatus +instance Data.ProtoLens.Field.HasField "tag" Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata +instance Data.ProtoLens.Field.HasField "runMetadata" Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata +instance Data.Default.Class.Default Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Util.Event.TaggedRunMetadata + module Proto.Tensorflow.Core.Framework.AttrValue data AttrValue -AttrValue :: Maybe ByteString -> Maybe Int64 -> Maybe Float -> Maybe Bool -> Maybe DataType -> Maybe TensorShapeProto -> Maybe TensorProto -> Maybe AttrValue'ListValue -> Maybe NameAttrList -> Maybe Text -> AttrValue -[_AttrValue's] :: AttrValue -> Maybe ByteString -[_AttrValue'i] :: AttrValue -> Maybe Int64 -[_AttrValue'f] :: AttrValue -> Maybe Float -[_AttrValue'b] :: AttrValue -> Maybe Bool -[_AttrValue'type'] :: AttrValue -> Maybe DataType -[_AttrValue'shape] :: AttrValue -> Maybe TensorShapeProto -[_AttrValue'tensor] :: AttrValue -> Maybe TensorProto -[_AttrValue'list] :: AttrValue -> Maybe AttrValue'ListValue -[_AttrValue'func] :: AttrValue -> Maybe NameAttrList -[_AttrValue'placeholder] :: AttrValue -> Maybe Text +AttrValue :: !(Maybe ByteString) -> !(Maybe Int64) -> !(Maybe Float) -> !(Maybe Bool) -> !(Maybe DataType) -> !(Maybe TensorShapeProto) -> !(Maybe TensorProto) -> !(Maybe AttrValue'ListValue) -> !(Maybe NameAttrList) -> !(Maybe Text) -> AttrValue +[_AttrValue's] :: AttrValue -> !(Maybe ByteString) +[_AttrValue'i] :: AttrValue -> !(Maybe Int64) +[_AttrValue'f] :: AttrValue -> !(Maybe Float) +[_AttrValue'b] :: AttrValue -> !(Maybe Bool) +[_AttrValue'type'] :: AttrValue -> !(Maybe DataType) +[_AttrValue'shape] :: AttrValue -> !(Maybe TensorShapeProto) +[_AttrValue'tensor] :: AttrValue -> !(Maybe TensorProto) +[_AttrValue'list] :: AttrValue -> !(Maybe AttrValue'ListValue) +[_AttrValue'func] :: AttrValue -> !(Maybe NameAttrList) +[_AttrValue'placeholder] :: AttrValue -> !(Maybe Text) data AttrValue'ListValue -AttrValue'ListValue :: [ByteString] -> [Int64] -> [Float] -> [Bool] -> [DataType] -> [TensorShapeProto] -> [TensorProto] -> AttrValue'ListValue -[_AttrValue'ListValue's] :: AttrValue'ListValue -> [ByteString] -[_AttrValue'ListValue'i] :: AttrValue'ListValue -> [Int64] -[_AttrValue'ListValue'f] :: AttrValue'ListValue -> [Float] -[_AttrValue'ListValue'b] :: AttrValue'ListValue -> [Bool] -[_AttrValue'ListValue'type'] :: AttrValue'ListValue -> [DataType] -[_AttrValue'ListValue'shape] :: AttrValue'ListValue -> [TensorShapeProto] -[_AttrValue'ListValue'tensor] :: AttrValue'ListValue -> [TensorProto] +AttrValue'ListValue :: ![ByteString] -> ![Int64] -> ![Float] -> ![Bool] -> ![DataType] -> ![TensorShapeProto] -> ![TensorProto] -> ![NameAttrList] -> AttrValue'ListValue +[_AttrValue'ListValue's] :: AttrValue'ListValue -> ![ByteString] +[_AttrValue'ListValue'i] :: AttrValue'ListValue -> ![Int64] +[_AttrValue'ListValue'f] :: AttrValue'ListValue -> ![Float] +[_AttrValue'ListValue'b] :: AttrValue'ListValue -> ![Bool] +[_AttrValue'ListValue'type'] :: AttrValue'ListValue -> ![DataType] +[_AttrValue'ListValue'shape] :: AttrValue'ListValue -> ![TensorShapeProto] +[_AttrValue'ListValue'tensor] :: AttrValue'ListValue -> ![TensorProto] +[_AttrValue'ListValue'func] :: AttrValue'ListValue -> ![NameAttrList] data NameAttrList -NameAttrList :: Text -> Map Text AttrValue -> NameAttrList -[_NameAttrList'name] :: NameAttrList -> Text -[_NameAttrList'attr] :: NameAttrList -> Map Text AttrValue +NameAttrList :: !Text -> !(Map Text AttrValue) -> NameAttrList +[_NameAttrList'name] :: NameAttrList -> !Text +[_NameAttrList'attr] :: NameAttrList -> !(Map Text AttrValue) data NameAttrList'AttrEntry -NameAttrList'AttrEntry :: Text -> Maybe AttrValue -> NameAttrList'AttrEntry -[_NameAttrList'AttrEntry'key] :: NameAttrList'AttrEntry -> Text -[_NameAttrList'AttrEntry'value] :: NameAttrList'AttrEntry -> Maybe AttrValue +NameAttrList'AttrEntry :: !Text -> !(Maybe AttrValue) -> NameAttrList'AttrEntry +[_NameAttrList'AttrEntry'key] :: NameAttrList'AttrEntry -> !Text +[_NameAttrList'AttrEntry'value] :: NameAttrList'AttrEntry -> !(Maybe AttrValue) attr :: HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') b :: HasField "b" msg msg' => Lens msg msg' (Field "b" msg) (Field "b" msg') f :: HasField "f" msg msg' => Lens msg msg' (Field "f" msg) (Field "f" msg') @@ -217,12 +460,12 @@ type' :: HasField "type'" msg msg' => Lens msg msg' (Field "type'" msg) (Field " value :: HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList'AttrEntry +instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.AttrValue instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.AttrValue instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList -instance GHC.Classes.Eq Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue -instance GHC.Show.Show Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue instance Data.ProtoLens.Field.HasField "s" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue instance Data.ProtoLens.Field.HasField "maybe's" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue instance Data.ProtoLens.Field.HasField "i" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue @@ -252,6 +495,7 @@ instance Data.ProtoLens.Field.HasField "b" Proto.Tensorflow.Core.Framework.AttrV instance Data.ProtoLens.Field.HasField "type'" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue instance Data.ProtoLens.Field.HasField "shape" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue instance Data.ProtoLens.Field.HasField "tensor" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue +instance Data.ProtoLens.Field.HasField "func" Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue instance Data.Default.Class.Default Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.AttrValue.AttrValue'ListValue instance Data.ProtoLens.Field.HasField "name" Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList Proto.Tensorflow.Core.Framework.AttrValue.NameAttrList @@ -266,16 +510,16 @@ instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.AttrValu module Proto.Tensorflow.Core.Framework.NodeDef data NodeDef -NodeDef :: Text -> Text -> [Text] -> Text -> Map Text AttrValue -> NodeDef -[_NodeDef'name] :: NodeDef -> Text -[_NodeDef'op] :: NodeDef -> Text -[_NodeDef'input] :: NodeDef -> [Text] -[_NodeDef'device] :: NodeDef -> Text -[_NodeDef'attr] :: NodeDef -> Map Text AttrValue +NodeDef :: !Text -> !Text -> ![Text] -> !Text -> !(Map Text AttrValue) -> NodeDef +[_NodeDef'name] :: NodeDef -> !Text +[_NodeDef'op] :: NodeDef -> !Text +[_NodeDef'input] :: NodeDef -> ![Text] +[_NodeDef'device] :: NodeDef -> !Text +[_NodeDef'attr] :: NodeDef -> !(Map Text AttrValue) data NodeDef'AttrEntry -NodeDef'AttrEntry :: Text -> Maybe AttrValue -> NodeDef'AttrEntry -[_NodeDef'AttrEntry'key] :: NodeDef'AttrEntry -> Text -[_NodeDef'AttrEntry'value] :: NodeDef'AttrEntry -> Maybe AttrValue +NodeDef'AttrEntry :: !Text -> !(Maybe AttrValue) -> NodeDef'AttrEntry +[_NodeDef'AttrEntry'key] :: NodeDef'AttrEntry -> !Text +[_NodeDef'AttrEntry'value] :: NodeDef'AttrEntry -> !(Maybe AttrValue) attr :: HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') device :: HasField "device" msg msg' => Lens msg msg' (Field "device" msg) (Field "device" msg') input :: HasField "input" msg msg' => Lens msg msg' (Field "input" msg) (Field "input" msg') @@ -303,43 +547,43 @@ instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.NodeDef. module Proto.Tensorflow.Core.Framework.OpDef data OpDef -OpDef :: Text -> [OpDef'ArgDef] -> [OpDef'ArgDef] -> [OpDef'AttrDef] -> Maybe OpDeprecation -> Text -> Text -> Bool -> Bool -> Bool -> Bool -> OpDef -[_OpDef'name] :: OpDef -> Text -[_OpDef'inputArg] :: OpDef -> [OpDef'ArgDef] -[_OpDef'outputArg] :: OpDef -> [OpDef'ArgDef] -[_OpDef'attr] :: OpDef -> [OpDef'AttrDef] -[_OpDef'deprecation] :: OpDef -> Maybe OpDeprecation -[_OpDef'summary] :: OpDef -> Text -[_OpDef'description] :: OpDef -> Text -[_OpDef'isCommutative] :: OpDef -> Bool -[_OpDef'isAggregate] :: OpDef -> Bool -[_OpDef'isStateful] :: OpDef -> Bool -[_OpDef'allowsUninitializedInput] :: OpDef -> Bool +OpDef :: !Text -> ![OpDef'ArgDef] -> ![OpDef'ArgDef] -> ![OpDef'AttrDef] -> !(Maybe OpDeprecation) -> !Text -> !Text -> !Bool -> !Bool -> !Bool -> !Bool -> OpDef +[_OpDef'name] :: OpDef -> !Text +[_OpDef'inputArg] :: OpDef -> ![OpDef'ArgDef] +[_OpDef'outputArg] :: OpDef -> ![OpDef'ArgDef] +[_OpDef'attr] :: OpDef -> ![OpDef'AttrDef] +[_OpDef'deprecation] :: OpDef -> !(Maybe OpDeprecation) +[_OpDef'summary] :: OpDef -> !Text +[_OpDef'description] :: OpDef -> !Text +[_OpDef'isCommutative] :: OpDef -> !Bool +[_OpDef'isAggregate] :: OpDef -> !Bool +[_OpDef'isStateful] :: OpDef -> !Bool +[_OpDef'allowsUninitializedInput] :: OpDef -> !Bool data OpDef'ArgDef -OpDef'ArgDef :: Text -> Text -> DataType -> Text -> Text -> Text -> Bool -> OpDef'ArgDef -[_OpDef'ArgDef'name] :: OpDef'ArgDef -> Text -[_OpDef'ArgDef'description] :: OpDef'ArgDef -> Text -[_OpDef'ArgDef'type'] :: OpDef'ArgDef -> DataType -[_OpDef'ArgDef'typeAttr] :: OpDef'ArgDef -> Text -[_OpDef'ArgDef'numberAttr] :: OpDef'ArgDef -> Text -[_OpDef'ArgDef'typeListAttr] :: OpDef'ArgDef -> Text -[_OpDef'ArgDef'isRef] :: OpDef'ArgDef -> Bool +OpDef'ArgDef :: !Text -> !Text -> !DataType -> !Text -> !Text -> !Text -> !Bool -> OpDef'ArgDef +[_OpDef'ArgDef'name] :: OpDef'ArgDef -> !Text +[_OpDef'ArgDef'description] :: OpDef'ArgDef -> !Text +[_OpDef'ArgDef'type'] :: OpDef'ArgDef -> !DataType +[_OpDef'ArgDef'typeAttr] :: OpDef'ArgDef -> !Text +[_OpDef'ArgDef'numberAttr] :: OpDef'ArgDef -> !Text +[_OpDef'ArgDef'typeListAttr] :: OpDef'ArgDef -> !Text +[_OpDef'ArgDef'isRef] :: OpDef'ArgDef -> !Bool data OpDef'AttrDef -OpDef'AttrDef :: Text -> Text -> Maybe AttrValue -> Text -> Bool -> Int64 -> Maybe AttrValue -> OpDef'AttrDef -[_OpDef'AttrDef'name] :: OpDef'AttrDef -> Text -[_OpDef'AttrDef'type'] :: OpDef'AttrDef -> Text -[_OpDef'AttrDef'defaultValue] :: OpDef'AttrDef -> Maybe AttrValue -[_OpDef'AttrDef'description] :: OpDef'AttrDef -> Text -[_OpDef'AttrDef'hasMinimum] :: OpDef'AttrDef -> Bool -[_OpDef'AttrDef'minimum] :: OpDef'AttrDef -> Int64 -[_OpDef'AttrDef'allowedValues] :: OpDef'AttrDef -> Maybe AttrValue +OpDef'AttrDef :: !Text -> !Text -> !(Maybe AttrValue) -> !Text -> !Bool -> !Int64 -> !(Maybe AttrValue) -> OpDef'AttrDef +[_OpDef'AttrDef'name] :: OpDef'AttrDef -> !Text +[_OpDef'AttrDef'type'] :: OpDef'AttrDef -> !Text +[_OpDef'AttrDef'defaultValue] :: OpDef'AttrDef -> !(Maybe AttrValue) +[_OpDef'AttrDef'description] :: OpDef'AttrDef -> !Text +[_OpDef'AttrDef'hasMinimum] :: OpDef'AttrDef -> !Bool +[_OpDef'AttrDef'minimum] :: OpDef'AttrDef -> !Int64 +[_OpDef'AttrDef'allowedValues] :: OpDef'AttrDef -> !(Maybe AttrValue) data OpDeprecation -OpDeprecation :: Int32 -> Text -> OpDeprecation -[_OpDeprecation'version] :: OpDeprecation -> Int32 -[_OpDeprecation'explanation] :: OpDeprecation -> Text +OpDeprecation :: !Int32 -> !Text -> OpDeprecation +[_OpDeprecation'version] :: OpDeprecation -> !Int32 +[_OpDeprecation'explanation] :: OpDeprecation -> !Text data OpList -OpList :: [OpDef] -> OpList -[_OpList'op] :: OpList -> [OpDef] +OpList :: ![OpDef] -> OpList +[_OpList'op] :: OpList -> ![OpDef] allowedValues :: HasField "allowedValues" msg msg' => Lens msg msg' (Field "allowedValues" msg) (Field "allowedValues" msg') allowsUninitializedInput :: HasField "allowsUninitializedInput" msg msg' => Lens msg msg' (Field "allowsUninitializedInput" msg) (Field "allowsUninitializedInput" msg') attr :: HasField "attr" msg msg' => Lens msg msg' (Field "attr" msg) (Field "attr" msg') @@ -420,11 +664,11 @@ instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.OpDef.Op module Proto.Tensorflow.Core.Framework.Graph data GraphDef -GraphDef :: [NodeDef] -> Maybe VersionDef -> Int32 -> Maybe FunctionDefLibrary -> GraphDef -[_GraphDef'node] :: GraphDef -> [NodeDef] -[_GraphDef'versions] :: GraphDef -> Maybe VersionDef -[_GraphDef'version] :: GraphDef -> Int32 -[_GraphDef'library] :: GraphDef -> Maybe FunctionDefLibrary +GraphDef :: ![NodeDef] -> !(Maybe VersionDef) -> !Int32 -> !(Maybe FunctionDefLibrary) -> GraphDef +[_GraphDef'node] :: GraphDef -> ![NodeDef] +[_GraphDef'versions] :: GraphDef -> !(Maybe VersionDef) +[_GraphDef'version] :: GraphDef -> !Int32 +[_GraphDef'library] :: GraphDef -> !(Maybe FunctionDefLibrary) library :: HasField "library" msg msg' => Lens msg msg' (Field "library" msg) (Field "library" msg') maybe'library :: HasField "maybe'library" msg msg' => Lens msg msg' (Field "maybe'library" msg) (Field "maybe'library" msg') maybe'versions :: HasField "maybe'versions" msg msg' => Lens msg msg' (Field "maybe'versions" msg) (Field "maybe'versions" msg') @@ -444,84 +688,86 @@ instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Framework.Graph.Gr module Proto.Tensorflow.Core.Protobuf.Config data ConfigProto -ConfigProto :: Map Text Int32 -> Int32 -> Int32 -> Bool -> [ThreadPoolOptionProto] -> Int32 -> [Text] -> Maybe GPUOptions -> Bool -> Bool -> Maybe GraphOptions -> Int64 -> ConfigProto -[_ConfigProto'deviceCount] :: ConfigProto -> Map Text Int32 -[_ConfigProto'intraOpParallelismThreads] :: ConfigProto -> Int32 -[_ConfigProto'interOpParallelismThreads] :: ConfigProto -> Int32 -[_ConfigProto'usePerSessionThreads] :: ConfigProto -> Bool -[_ConfigProto'sessionInterOpThreadPool] :: ConfigProto -> [ThreadPoolOptionProto] -[_ConfigProto'placementPeriod] :: ConfigProto -> Int32 -[_ConfigProto'deviceFilters] :: ConfigProto -> [Text] -[_ConfigProto'gpuOptions] :: ConfigProto -> Maybe GPUOptions -[_ConfigProto'allowSoftPlacement] :: ConfigProto -> Bool -[_ConfigProto'logDevicePlacement] :: ConfigProto -> Bool -[_ConfigProto'graphOptions] :: ConfigProto -> Maybe GraphOptions -[_ConfigProto'operationTimeoutInMs] :: ConfigProto -> Int64 +ConfigProto :: !(Map Text Int32) -> !Int32 -> !Int32 -> !Bool -> ![ThreadPoolOptionProto] -> !Int32 -> ![Text] -> !(Maybe GPUOptions) -> !Bool -> !Bool -> !(Maybe GraphOptions) -> !Int64 -> !(Maybe RPCOptions) -> ConfigProto +[_ConfigProto'deviceCount] :: ConfigProto -> !(Map Text Int32) +[_ConfigProto'intraOpParallelismThreads] :: ConfigProto -> !Int32 +[_ConfigProto'interOpParallelismThreads] :: ConfigProto -> !Int32 +[_ConfigProto'usePerSessionThreads] :: ConfigProto -> !Bool +[_ConfigProto'sessionInterOpThreadPool] :: ConfigProto -> ![ThreadPoolOptionProto] +[_ConfigProto'placementPeriod] :: ConfigProto -> !Int32 +[_ConfigProto'deviceFilters] :: ConfigProto -> ![Text] +[_ConfigProto'gpuOptions] :: ConfigProto -> !(Maybe GPUOptions) +[_ConfigProto'allowSoftPlacement] :: ConfigProto -> !Bool +[_ConfigProto'logDevicePlacement] :: ConfigProto -> !Bool +[_ConfigProto'graphOptions] :: ConfigProto -> !(Maybe GraphOptions) +[_ConfigProto'operationTimeoutInMs] :: ConfigProto -> !Int64 +[_ConfigProto'rpcOptions] :: ConfigProto -> !(Maybe RPCOptions) data ConfigProto'DeviceCountEntry -ConfigProto'DeviceCountEntry :: Text -> Int32 -> ConfigProto'DeviceCountEntry -[_ConfigProto'DeviceCountEntry'key] :: ConfigProto'DeviceCountEntry -> Text -[_ConfigProto'DeviceCountEntry'value] :: ConfigProto'DeviceCountEntry -> Int32 -data DebugTensorWatch -DebugTensorWatch :: Text -> Int32 -> [Text] -> [Text] -> DebugTensorWatch -[_DebugTensorWatch'nodeName] :: DebugTensorWatch -> Text -[_DebugTensorWatch'outputSlot] :: DebugTensorWatch -> Int32 -[_DebugTensorWatch'debugOps] :: DebugTensorWatch -> [Text] -[_DebugTensorWatch'debugUrls] :: DebugTensorWatch -> [Text] +ConfigProto'DeviceCountEntry :: !Text -> !Int32 -> ConfigProto'DeviceCountEntry +[_ConfigProto'DeviceCountEntry'key] :: ConfigProto'DeviceCountEntry -> !Text +[_ConfigProto'DeviceCountEntry'value] :: ConfigProto'DeviceCountEntry -> !Int32 data GPUOptions -GPUOptions :: Double -> Text -> Int64 -> Bool -> Text -> GPUOptions -[_GPUOptions'perProcessGpuMemoryFraction] :: GPUOptions -> Double -[_GPUOptions'allocatorType] :: GPUOptions -> Text -[_GPUOptions'deferredDeletionBytes] :: GPUOptions -> Int64 -[_GPUOptions'allowGrowth] :: GPUOptions -> Bool -[_GPUOptions'visibleDeviceList] :: GPUOptions -> Text +GPUOptions :: !Double -> !Text -> !Int64 -> !Bool -> !Text -> GPUOptions +[_GPUOptions'perProcessGpuMemoryFraction] :: GPUOptions -> !Double +[_GPUOptions'allocatorType] :: GPUOptions -> !Text +[_GPUOptions'deferredDeletionBytes] :: GPUOptions -> !Int64 +[_GPUOptions'allowGrowth] :: GPUOptions -> !Bool +[_GPUOptions'visibleDeviceList] :: GPUOptions -> !Text data GraphOptions -GraphOptions :: Bool -> Maybe OptimizerOptions -> Int64 -> Int64 -> Bool -> Bool -> Bool -> Int32 -> GraphOptions -[_GraphOptions'enableRecvScheduling] :: GraphOptions -> Bool -[_GraphOptions'optimizerOptions] :: GraphOptions -> Maybe OptimizerOptions -[_GraphOptions'buildCostModel] :: GraphOptions -> Int64 -[_GraphOptions'buildCostModelAfter] :: GraphOptions -> Int64 -[_GraphOptions'inferShapes] :: GraphOptions -> Bool -[_GraphOptions'placePrunedGraph] :: GraphOptions -> Bool -[_GraphOptions'enableBfloat16Sendrecv] :: GraphOptions -> Bool -[_GraphOptions'timelineStep] :: GraphOptions -> Int32 +GraphOptions :: !Bool -> !(Maybe OptimizerOptions) -> !Int64 -> !Int64 -> !Bool -> !Bool -> !Bool -> !Int32 -> GraphOptions +[_GraphOptions'enableRecvScheduling] :: GraphOptions -> !Bool +[_GraphOptions'optimizerOptions] :: GraphOptions -> !(Maybe OptimizerOptions) +[_GraphOptions'buildCostModel] :: GraphOptions -> !Int64 +[_GraphOptions'buildCostModelAfter] :: GraphOptions -> !Int64 +[_GraphOptions'inferShapes] :: GraphOptions -> !Bool +[_GraphOptions'placePrunedGraph] :: GraphOptions -> !Bool +[_GraphOptions'enableBfloat16Sendrecv] :: GraphOptions -> !Bool +[_GraphOptions'timelineStep] :: GraphOptions -> !Int32 data OptimizerOptions -OptimizerOptions :: Bool -> Bool -> Bool -> OptimizerOptions'Level -> OptimizerOptions -[_OptimizerOptions'doCommonSubexpressionElimination] :: OptimizerOptions -> Bool -[_OptimizerOptions'doConstantFolding] :: OptimizerOptions -> Bool -[_OptimizerOptions'doFunctionInlining] :: OptimizerOptions -> Bool -[_OptimizerOptions'optLevel] :: OptimizerOptions -> OptimizerOptions'Level +OptimizerOptions :: !Bool -> !Bool -> !Bool -> !OptimizerOptions'Level -> !OptimizerOptions'GlobalJitLevel -> OptimizerOptions +[_OptimizerOptions'doCommonSubexpressionElimination] :: OptimizerOptions -> !Bool +[_OptimizerOptions'doConstantFolding] :: OptimizerOptions -> !Bool +[_OptimizerOptions'doFunctionInlining] :: OptimizerOptions -> !Bool +[_OptimizerOptions'optLevel] :: OptimizerOptions -> !OptimizerOptions'Level +[_OptimizerOptions'globalJitLevel] :: OptimizerOptions -> !OptimizerOptions'GlobalJitLevel +data OptimizerOptions'GlobalJitLevel +OptimizerOptions'OFF :: OptimizerOptions'GlobalJitLevel +OptimizerOptions'DEFAULT :: OptimizerOptions'GlobalJitLevel +OptimizerOptions'ON_1 :: OptimizerOptions'GlobalJitLevel +OptimizerOptions'ON_2 :: OptimizerOptions'GlobalJitLevel data OptimizerOptions'Level OptimizerOptions'L0 :: OptimizerOptions'Level OptimizerOptions'L1 :: OptimizerOptions'Level +data RPCOptions +RPCOptions :: !Bool -> RPCOptions +[_RPCOptions'useRpcForInprocessMaster] :: RPCOptions -> !Bool data RunMetadata -RunMetadata :: Maybe StepStats -> Maybe CostGraphDef -> [GraphDef] -> RunMetadata -[_RunMetadata'stepStats] :: RunMetadata -> Maybe StepStats -[_RunMetadata'costGraph] :: RunMetadata -> Maybe CostGraphDef -[_RunMetadata'partitionGraphs] :: RunMetadata -> [GraphDef] +RunMetadata :: !(Maybe StepStats) -> !(Maybe CostGraphDef) -> ![GraphDef] -> RunMetadata +[_RunMetadata'stepStats] :: RunMetadata -> !(Maybe StepStats) +[_RunMetadata'costGraph] :: RunMetadata -> !(Maybe CostGraphDef) +[_RunMetadata'partitionGraphs] :: RunMetadata -> ![GraphDef] data RunOptions -RunOptions :: RunOptions'TraceLevel -> Int64 -> Int32 -> [DebugTensorWatch] -> Bool -> RunOptions -[_RunOptions'traceLevel] :: RunOptions -> RunOptions'TraceLevel -[_RunOptions'timeoutInMs] :: RunOptions -> Int64 -[_RunOptions'interOpThreadPool] :: RunOptions -> Int32 -[_RunOptions'debugTensorWatchOpts] :: RunOptions -> [DebugTensorWatch] -[_RunOptions'outputPartitionGraphs] :: RunOptions -> Bool +RunOptions :: !RunOptions'TraceLevel -> !Int64 -> !Int32 -> !Bool -> !(Maybe DebugOptions) -> RunOptions +[_RunOptions'traceLevel] :: RunOptions -> !RunOptions'TraceLevel +[_RunOptions'timeoutInMs] :: RunOptions -> !Int64 +[_RunOptions'interOpThreadPool] :: RunOptions -> !Int32 +[_RunOptions'outputPartitionGraphs] :: RunOptions -> !Bool +[_RunOptions'debugOptions] :: RunOptions -> !(Maybe DebugOptions) data RunOptions'TraceLevel RunOptions'NO_TRACE :: RunOptions'TraceLevel RunOptions'SOFTWARE_TRACE :: RunOptions'TraceLevel RunOptions'HARDWARE_TRACE :: RunOptions'TraceLevel RunOptions'FULL_TRACE :: RunOptions'TraceLevel data ThreadPoolOptionProto -ThreadPoolOptionProto :: Int32 -> ThreadPoolOptionProto -[_ThreadPoolOptionProto'numThreads] :: ThreadPoolOptionProto -> Int32 +ThreadPoolOptionProto :: !Int32 -> ThreadPoolOptionProto +[_ThreadPoolOptionProto'numThreads] :: ThreadPoolOptionProto -> !Int32 allocatorType :: HasField "allocatorType" msg msg' => Lens msg msg' (Field "allocatorType" msg) (Field "allocatorType" msg') allowGrowth :: HasField "allowGrowth" msg msg' => Lens msg msg' (Field "allowGrowth" msg) (Field "allowGrowth" msg') allowSoftPlacement :: HasField "allowSoftPlacement" msg msg' => Lens msg msg' (Field "allowSoftPlacement" msg) (Field "allowSoftPlacement" msg') buildCostModel :: HasField "buildCostModel" msg msg' => Lens msg msg' (Field "buildCostModel" msg) (Field "buildCostModel" msg') buildCostModelAfter :: HasField "buildCostModelAfter" msg msg' => Lens msg msg' (Field "buildCostModelAfter" msg) (Field "buildCostModelAfter" msg') costGraph :: HasField "costGraph" msg msg' => Lens msg msg' (Field "costGraph" msg) (Field "costGraph" msg') -debugOps :: HasField "debugOps" msg msg' => Lens msg msg' (Field "debugOps" msg) (Field "debugOps" msg') -debugTensorWatchOpts :: HasField "debugTensorWatchOpts" msg msg' => Lens msg msg' (Field "debugTensorWatchOpts" msg) (Field "debugTensorWatchOpts" msg') -debugUrls :: HasField "debugUrls" msg msg' => Lens msg msg' (Field "debugUrls" msg) (Field "debugUrls" msg') +debugOptions :: HasField "debugOptions" msg msg' => Lens msg msg' (Field "debugOptions" msg) (Field "debugOptions" msg') deferredDeletionBytes :: HasField "deferredDeletionBytes" msg msg' => Lens msg msg' (Field "deferredDeletionBytes" msg) (Field "deferredDeletionBytes" msg') deviceCount :: HasField "deviceCount" msg msg' => Lens msg msg' (Field "deviceCount" msg) (Field "deviceCount" msg') deviceFilters :: HasField "deviceFilters" msg msg' => Lens msg msg' (Field "deviceFilters" msg) (Field "deviceFilters" msg') @@ -530,6 +776,7 @@ doConstantFolding :: HasField "doConstantFolding" msg msg' => Lens msg msg' (Fie doFunctionInlining :: HasField "doFunctionInlining" msg msg' => Lens msg msg' (Field "doFunctionInlining" msg) (Field "doFunctionInlining" msg') enableBfloat16Sendrecv :: HasField "enableBfloat16Sendrecv" msg msg' => Lens msg msg' (Field "enableBfloat16Sendrecv" msg) (Field "enableBfloat16Sendrecv" msg') enableRecvScheduling :: HasField "enableRecvScheduling" msg msg' => Lens msg msg' (Field "enableRecvScheduling" msg) (Field "enableRecvScheduling" msg') +globalJitLevel :: HasField "globalJitLevel" msg msg' => Lens msg msg' (Field "globalJitLevel" msg) (Field "globalJitLevel" msg') gpuOptions :: HasField "gpuOptions" msg msg' => Lens msg msg' (Field "gpuOptions" msg) (Field "gpuOptions" msg') graphOptions :: HasField "graphOptions" msg msg' => Lens msg msg' (Field "graphOptions" msg) (Field "graphOptions" msg') inferShapes :: HasField "inferShapes" msg msg' => Lens msg msg' (Field "inferShapes" msg) (Field "inferShapes" msg') @@ -539,27 +786,29 @@ intraOpParallelismThreads :: HasField "intraOpParallelismThreads" msg msg' => Le key :: HasField "key" msg msg' => Lens msg msg' (Field "key" msg) (Field "key" msg') logDevicePlacement :: HasField "logDevicePlacement" msg msg' => Lens msg msg' (Field "logDevicePlacement" msg) (Field "logDevicePlacement" msg') maybe'costGraph :: HasField "maybe'costGraph" msg msg' => Lens msg msg' (Field "maybe'costGraph" msg) (Field "maybe'costGraph" msg') +maybe'debugOptions :: HasField "maybe'debugOptions" msg msg' => Lens msg msg' (Field "maybe'debugOptions" msg) (Field "maybe'debugOptions" msg') maybe'gpuOptions :: HasField "maybe'gpuOptions" msg msg' => Lens msg msg' (Field "maybe'gpuOptions" msg) (Field "maybe'gpuOptions" msg') maybe'graphOptions :: HasField "maybe'graphOptions" msg msg' => Lens msg msg' (Field "maybe'graphOptions" msg) (Field "maybe'graphOptions" msg') maybe'optimizerOptions :: HasField "maybe'optimizerOptions" msg msg' => Lens msg msg' (Field "maybe'optimizerOptions" msg) (Field "maybe'optimizerOptions" msg') +maybe'rpcOptions :: HasField "maybe'rpcOptions" msg msg' => Lens msg msg' (Field "maybe'rpcOptions" msg) (Field "maybe'rpcOptions" msg') maybe'stepStats :: HasField "maybe'stepStats" msg msg' => Lens msg msg' (Field "maybe'stepStats" msg) (Field "maybe'stepStats" msg') -nodeName :: HasField "nodeName" msg msg' => Lens msg msg' (Field "nodeName" msg) (Field "nodeName" msg') numThreads :: HasField "numThreads" msg msg' => Lens msg msg' (Field "numThreads" msg) (Field "numThreads" msg') operationTimeoutInMs :: HasField "operationTimeoutInMs" msg msg' => Lens msg msg' (Field "operationTimeoutInMs" msg) (Field "operationTimeoutInMs" msg') optLevel :: HasField "optLevel" msg msg' => Lens msg msg' (Field "optLevel" msg) (Field "optLevel" msg') optimizerOptions :: HasField "optimizerOptions" msg msg' => Lens msg msg' (Field "optimizerOptions" msg) (Field "optimizerOptions" msg') outputPartitionGraphs :: HasField "outputPartitionGraphs" msg msg' => Lens msg msg' (Field "outputPartitionGraphs" msg) (Field "outputPartitionGraphs" msg') -outputSlot :: HasField "outputSlot" msg msg' => Lens msg msg' (Field "outputSlot" msg) (Field "outputSlot" msg') partitionGraphs :: HasField "partitionGraphs" msg msg' => Lens msg msg' (Field "partitionGraphs" msg) (Field "partitionGraphs" msg') perProcessGpuMemoryFraction :: HasField "perProcessGpuMemoryFraction" msg msg' => Lens msg msg' (Field "perProcessGpuMemoryFraction" msg) (Field "perProcessGpuMemoryFraction" msg') placePrunedGraph :: HasField "placePrunedGraph" msg msg' => Lens msg msg' (Field "placePrunedGraph" msg) (Field "placePrunedGraph" msg') placementPeriod :: HasField "placementPeriod" msg msg' => Lens msg msg' (Field "placementPeriod" msg) (Field "placementPeriod" msg') +rpcOptions :: HasField "rpcOptions" msg msg' => Lens msg msg' (Field "rpcOptions" msg) (Field "rpcOptions" msg') sessionInterOpThreadPool :: HasField "sessionInterOpThreadPool" msg msg' => Lens msg msg' (Field "sessionInterOpThreadPool" msg) (Field "sessionInterOpThreadPool" msg') stepStats :: HasField "stepStats" msg msg' => Lens msg msg' (Field "stepStats" msg) (Field "stepStats" msg') timelineStep :: HasField "timelineStep" msg msg' => Lens msg msg' (Field "timelineStep" msg) (Field "timelineStep" msg') timeoutInMs :: HasField "timeoutInMs" msg msg' => Lens msg msg' (Field "timeoutInMs" msg) (Field "timeoutInMs" msg') traceLevel :: HasField "traceLevel" msg msg' => Lens msg msg' (Field "traceLevel" msg) (Field "traceLevel" msg') usePerSessionThreads :: HasField "usePerSessionThreads" msg msg' => Lens msg msg' (Field "usePerSessionThreads" msg) (Field "usePerSessionThreads" msg') +useRpcForInprocessMaster :: HasField "useRpcForInprocessMaster" msg msg' => Lens msg msg' (Field "useRpcForInprocessMaster" msg) (Field "useRpcForInprocessMaster" msg') value :: HasField "value" msg msg' => Lens msg msg' (Field "value" msg) (Field "value" msg') visibleDeviceList :: HasField "visibleDeviceList" msg msg' => Lens msg msg' (Field "visibleDeviceList" msg) (Field "visibleDeviceList" msg') instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.ConfigProto @@ -572,16 +821,18 @@ instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLe instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.RunMetadata instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.RunMetadata +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.RPCOptions +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.RPCOptions instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.GraphOptions instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.GraphOptions instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level +instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel +instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.GPUOptions instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.GPUOptions -instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch -instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch instance GHC.Classes.Eq Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry instance GHC.Show.Show Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry instance Data.ProtoLens.Field.HasField "deviceCount" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto @@ -598,18 +849,14 @@ instance Data.ProtoLens.Field.HasField "logDevicePlacement" Proto.Tensorflow.Cor instance Data.ProtoLens.Field.HasField "graphOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto instance Data.ProtoLens.Field.HasField "maybe'graphOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto instance Data.ProtoLens.Field.HasField "operationTimeoutInMs" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "rpcOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto +instance Data.ProtoLens.Field.HasField "maybe'rpcOptions" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto Proto.Tensorflow.Core.Protobuf.Config.ConfigProto instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.ConfigProto instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.ConfigProto instance Data.ProtoLens.Field.HasField "key" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry instance Data.ProtoLens.Field.HasField "value" Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.ConfigProto'DeviceCountEntry -instance Data.ProtoLens.Field.HasField "nodeName" Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch -instance Data.ProtoLens.Field.HasField "outputSlot" Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch -instance Data.ProtoLens.Field.HasField "debugOps" Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch -instance Data.ProtoLens.Field.HasField "debugUrls" Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch -instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch -instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.DebugTensorWatch instance Data.ProtoLens.Field.HasField "perProcessGpuMemoryFraction" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions instance Data.ProtoLens.Field.HasField "allocatorType" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions instance Data.ProtoLens.Field.HasField "deferredDeletionBytes" Proto.Tensorflow.Core.Protobuf.Config.GPUOptions Proto.Tensorflow.Core.Protobuf.Config.GPUOptions @@ -632,13 +879,22 @@ instance Data.ProtoLens.Field.HasField "doCommonSubexpressionElimination" Proto. instance Data.ProtoLens.Field.HasField "doConstantFolding" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions instance Data.ProtoLens.Field.HasField "doFunctionInlining" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions instance Data.ProtoLens.Field.HasField "optLevel" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions +instance Data.ProtoLens.Field.HasField "globalJitLevel" Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel +instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel +instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel +instance GHC.Enum.Enum Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel +instance GHC.Enum.Bounded Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'GlobalJitLevel instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level instance Data.ProtoLens.Message.FieldDefault Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level instance Data.ProtoLens.Message.MessageEnum Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level instance GHC.Enum.Enum Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level instance GHC.Enum.Bounded Proto.Tensorflow.Core.Protobuf.Config.OptimizerOptions'Level +instance Data.ProtoLens.Field.HasField "useRpcForInprocessMaster" Proto.Tensorflow.Core.Protobuf.Config.RPCOptions Proto.Tensorflow.Core.Protobuf.Config.RPCOptions +instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.RPCOptions +instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.RPCOptions instance Data.ProtoLens.Field.HasField "stepStats" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata instance Data.ProtoLens.Field.HasField "maybe'stepStats" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata instance Data.ProtoLens.Field.HasField "costGraph" Proto.Tensorflow.Core.Protobuf.Config.RunMetadata Proto.Tensorflow.Core.Protobuf.Config.RunMetadata @@ -649,8 +905,9 @@ instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.Ru instance Data.ProtoLens.Field.HasField "traceLevel" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions instance Data.ProtoLens.Field.HasField "timeoutInMs" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions instance Data.ProtoLens.Field.HasField "interOpThreadPool" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions -instance Data.ProtoLens.Field.HasField "debugTensorWatchOpts" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions instance Data.ProtoLens.Field.HasField "outputPartitionGraphs" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions +instance Data.ProtoLens.Field.HasField "debugOptions" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions +instance Data.ProtoLens.Field.HasField "maybe'debugOptions" Proto.Tensorflow.Core.Protobuf.Config.RunOptions Proto.Tensorflow.Core.Protobuf.Config.RunOptions instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.RunOptions instance Data.ProtoLens.Message.Message Proto.Tensorflow.Core.Protobuf.Config.RunOptions instance Data.Default.Class.Default Proto.Tensorflow.Core.Protobuf.Config.RunOptions'TraceLevel diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html b/docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html index f8673b4..ab9d4e3 100644 --- a/docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html +++ b/docs/haddock/tensorflow-queue-0.1.0.0/TensorFlow-Queue.html @@ -1,9 +1,8 @@ TensorFlow.Queue

    tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Queue

    Description

    Queues in TensorFlow graph. Very limited support for now.

    Synopsis

    Documentation

    data Queue2 a b Source

    A queue carrying tuples. The underlying structure is more - versatile and can be made to support arbitrary tuples.

    makeQueue2 Source

    Arguments

    :: (TensorType a, TensorType b) 
    => Int64

    The upper bound on the number of elements in +

    tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Queue

    Description

    Queues in TensorFlow graph. Very limited support for now.

    Synopsis

    Documentation

    data Queue as

    A queue carrying tuples.

    makeQueue

    Arguments

    :: (MonadBuild m, TensorTypes as) 
    => Int64

    The upper bound on the number of elements in this queue. Negative numbers mean no limit.

    -> ByteString

    If non-empty, this queue will be shared - under the given name across multiple sessions.

    -> Build (Queue2 a b) 

    Creates a new queue with the given capacity and shared name.

    enqueue :: forall a b v1 v2. (TensorType a, TensorType b) => Queue2 a b -> Tensor v1 a -> Tensor v2 b -> Build ControlNode Source

    Adds the given values to the queue.

    dequeue Source

    Arguments

    :: (TensorType a, TensorType b) 
    => Queue2 a b 
    -> Build (Tensor Ref a, Tensor Ref b)

    Dequeued tensors. They are paired in a sense + under the given name across multiple sessions.

    -> m (Queue as) 

    Creates a new queue with the given capacity and shared name.

    enqueue :: forall as v m. (MonadBuild m, TensorTypes as) => Queue as -> TensorList v as -> m ControlNode

    Adds the given values to the queue.

    dequeue

    Arguments

    :: (MonadBuild m, TensorTypes as) 
    => Queue as 
    -> m (TensorList Value as)

    Dequeued tensors. They are coupled in a sense that values appear together, even if they are not consumed together.

    Retrieves the values from the queue.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html index f9ac0fb..a841881 100644 --- a/docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html +++ b/docs/haddock/tensorflow-queue-0.1.0.0/doc-index.html @@ -1,4 +1,4 @@ tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues. (Index)

    tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

    \ No newline at end of file +

    tensorflow-queue-0.1.0.0: Basic access to TensorFlow queues.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/mini_TensorFlow-Queue.html b/docs/haddock/tensorflow-queue-0.1.0.0/mini_TensorFlow-Queue.html index 2e16ab1..ee5969c 100644 --- a/docs/haddock/tensorflow-queue-0.1.0.0/mini_TensorFlow-Queue.html +++ b/docs/haddock/tensorflow-queue-0.1.0.0/mini_TensorFlow-Queue.html @@ -1,4 +1,4 @@ TensorFlow.Queue

    TensorFlow.Queue

    \ No newline at end of file +

    TensorFlow.Queue

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/src/TensorFlow-Queue.html b/docs/haddock/tensorflow-queue-0.1.0.0/src/TensorFlow-Queue.html deleted file mode 100644 index df19140..0000000 --- a/docs/haddock/tensorflow-queue-0.1.0.0/src/TensorFlow-Queue.html +++ /dev/null @@ -1,89 +0,0 @@ - - - - - -src/TensorFlow/Queue.hs - - - -
    -- Copyright 2016 TensorFlow authors.
    ---
    --- Licensed under the Apache License, Version 2.0 (the "License");
    --- you may not use this file except in compliance with the License.
    --- You may obtain a copy of the License at
    ---
    ---     http://www.apache.org/licenses/LICENSE-2.0
    ---
    --- Unless required by applicable law or agreed to in writing, software
    --- distributed under the License is distributed on an "AS IS" BASIS,
    --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    --- See the License for the specific language governing permissions and
    --- limitations under the License.
    -
    -{-# LANGUAGE OverloadedStrings #-}
    -{-# LANGUAGE ScopedTypeVariables #-}
    -
    --- | Queues in TensorFlow graph. Very limited support for now.
    -module TensorFlow.Queue (Queue2, makeQueue2, enqueue, dequeue) where
    -
    -import Data.ByteString (ByteString)
    -import Data.Int (Int64)
    -import Lens.Family2 ((.~), (&))
    -import TensorFlow.Build (ControlNode, Build, addInitializer, opAttr, opDef)
    -import TensorFlow.BuildOp (buildOp)
    -import TensorFlow.ControlFlow (group)
    -import TensorFlow.Tensor (Ref, Tensor)
    -import TensorFlow.Types (TensorType, tensorType)
    -
    --- | A queue carrying tuples. The underlying structure is more
    --- versatile and can be made to support arbitrary tuples.
    -data Queue2 a b = Queue2 { handle :: Handle }
    -
    -type Handle = Tensor Ref ByteString
    -
    --- | Adds the given values to the queue.
    -enqueue :: forall a b v1 v2. (TensorType a, TensorType b)
    -           => Queue2 a b
    -           -> Tensor v1 a
    -           -> Tensor v2 b
    -           -> Build ControlNode
    -enqueue q =
    -    buildOp (opDef "QueueEnqueue"
    -             & opAttr "Tcomponents" .~ [ tensorType (undefined :: a)
    -                                       , tensorType (undefined :: b)])
    -    (handle q)
    -
    --- | Retrieves the values from the queue.
    -dequeue :: forall a b . (TensorType a, TensorType b)
    -           => Queue2 a b
    -           -> Build (Tensor Ref a, Tensor Ref b)
    -           -- ^ Dequeued tensors. They are paired in a sense
    -           -- that values appear together, even if they are
    -           -- not consumed together.
    -dequeue q =
    -    buildOp (opDef "QueueDequeue"
    -             & opAttr "component_types" .~ [ tensorType (undefined :: a)
    -                                           , tensorType (undefined :: b)])
    -    (handle q)
    -
    --- | Creates a new queue with the given capacity and shared name.
    -makeQueue2 :: forall a b . (TensorType a, TensorType b)
    -              => Int64  -- ^ The upper bound on the number of elements in
    -                        --  this queue. Negative numbers mean no limit.
    -              -> ByteString -- ^ If non-empty, this queue will be shared
    -                            -- under the given name across multiple sessions.
    -              -> Build (Queue2 a b)
    -makeQueue2 capacity sharedName = do
    -    q <- buildOp (opDef "FIFOQueue"
    -                     & opAttr "component_types" .~ [ tensorType (undefined :: a)
    -                                                   , tensorType (undefined :: b)]
    -                     & opAttr "shared_name" .~ sharedName
    -                     & opAttr "capacity" .~ capacity
    -                    )
    -    group q >>= addInitializer
    -    return (Queue2 q)
    -
    --- TODO(gnezdo): Figure out the closing story for queues.
    -
    - diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-queue-0.1.0.0/src/hscolour.css deleted file mode 100644 index c15919e..0000000 --- a/docs/haddock/tensorflow-queue-0.1.0.0/src/hscolour.css +++ /dev/null @@ -1,5 +0,0 @@ -.hs-keyglyph, .hs-layout {color: red;} -.hs-keyword {color: blue;} -.hs-comment, .hs-comment a {color: green;} -.hs-str, .hs-chr {color: teal;} -.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {} diff --git a/docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt b/docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt index 2db224c..1f71b85 100644 --- a/docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt +++ b/docs/haddock/tensorflow-queue-0.1.0.0/tensorflow-queue.txt @@ -12,15 +12,14 @@ -- | Queues in TensorFlow graph. Very limited support for now. module TensorFlow.Queue --- | A queue carrying tuples. The underlying structure is more versatile --- and can be made to support arbitrary tuples. -data Queue2 a b +-- | A queue carrying tuples. +data Queue (as :: [*]) -- | Creates a new queue with the given capacity and shared name. -makeQueue2 :: (TensorType a, TensorType b) => Int64 -> ByteString -> Build (Queue2 a b) +makeQueue :: (MonadBuild m, TensorTypes as) => Int64 -> ByteString -> m (Queue as) -- | Adds the given values to the queue. -enqueue :: (TensorType a, TensorType b) => Queue2 a b -> Tensor v1 a -> Tensor v2 b -> Build ControlNode +enqueue :: (MonadBuild m, TensorTypes as) => Queue as -> TensorList v as -> m ControlNode -- | Retrieves the values from the queue. -dequeue :: (TensorType a, TensorType b) => Queue2 a b -> Build (Tensor Ref a, Tensor Ref b) +dequeue :: (MonadBuild m, TensorTypes as) => Queue as -> m (TensorList Value as) diff --git a/docs/haddock/tensorflow-records-0.1.0.0/TensorFlow-Records.html b/docs/haddock/tensorflow-records-0.1.0.0/TensorFlow-Records.html new file mode 100644 index 0000000..63effe4 --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/TensorFlow-Records.html @@ -0,0 +1,6 @@ +TensorFlow.Records

    tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Records

    Description

    Encoder and decoder for the TensorFlow "TFRecords" format.

    Records

    putTFRecord :: ByteString -> Put

    Put one TFRecord with the given contents.

    getTFRecord :: Get ByteString

    Parse one TFRecord.

    getTFRecords :: Get [ByteString]

    Parse many TFRecords as a list. Note you probably want streaming instead + as provided by the tensorflow-records-conduit package.

    Implementation

    These may be useful for encoding or decoding to types other than + ByteString that have their own Cereal codecs.

    getTFRecordLength :: Get Word64

    Get a length and verify its checksum.

    getTFRecordData :: Word64 -> Get ByteString

    Get a record payload and verify its checksum.

    putTFRecordLength :: Word64 -> Put

    Put a record length and its checksum.

    putTFRecordData :: ByteString -> Put

    Put a record payload and its checksum.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-records-0.1.0.0/doc-index.html new file mode 100644 index 0000000..a149abc --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/doc-index.html @@ -0,0 +1,4 @@ +tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format. (Index)

    tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

    Index

    getTFRecordTensorFlow.Records
    getTFRecordDataTensorFlow.Records
    getTFRecordLengthTensorFlow.Records
    getTFRecordsTensorFlow.Records
    putTFRecordTensorFlow.Records
    putTFRecordDataTensorFlow.Records
    putTFRecordLengthTensorFlow.Records
    \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/frames.html b/docs/haddock/tensorflow-records-0.1.0.0/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/tensorflow-records-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-records-0.1.0.0/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "
      " + btns + "
    " + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/tensorflow-records-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-records-0.1.0.0/hslogo-16.png new file mode 100644 index 0000000..0ff8579 Binary files /dev/null and b/docs/haddock/tensorflow-records-0.1.0.0/hslogo-16.png differ diff --git a/docs/haddock/tensorflow-records-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-records-0.1.0.0/index-frames.html new file mode 100644 index 0000000..30dacc0 --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/index-frames.html @@ -0,0 +1,4 @@ +tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format. \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/index.html b/docs/haddock/tensorflow-records-0.1.0.0/index.html new file mode 100644 index 0000000..88ed363 --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/index.html @@ -0,0 +1,4 @@ +tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

    tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

    tensorflow-records-0.1.0.0: Encoder and decoder for the TensorFlow \"TFRecords\" format.

    Encoder and decoder for the TensorFlow "TFRecords" format.

    Modules

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/mini_TensorFlow-Records.html b/docs/haddock/tensorflow-records-0.1.0.0/mini_TensorFlow-Records.html new file mode 100644 index 0000000..0dda493 --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/mini_TensorFlow-Records.html @@ -0,0 +1,4 @@ +TensorFlow.Records

    TensorFlow.Records

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-0.1.0.0/minus.gif b/docs/haddock/tensorflow-records-0.1.0.0/minus.gif new file mode 100644 index 0000000..1deac2f Binary files /dev/null and b/docs/haddock/tensorflow-records-0.1.0.0/minus.gif differ diff --git a/docs/haddock/tensorflow-records-0.1.0.0/ocean.css b/docs/haddock/tensorflow-records-0.1.0.0/ocean.css new file mode 100644 index 0000000..1110b40 --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/ocean.css @@ -0,0 +1,600 @@ +/* @group Fundamentals */ + +* { margin: 0; padding: 0 } + +/* Is this portable? */ +html { + background-color: white; + width: 100%; + height: 100%; +} + +body { + background: white; + color: black; + text-align: left; + min-height: 100%; + position: relative; +} + +p { + margin: 0.8em 0; +} + +ul, ol { + margin: 0.8em 0 0.8em 2em; +} + +dl { + margin: 0.8em 0; +} + +dt { + font-weight: bold; +} +dd { + margin-left: 2em; +} + +a { text-decoration: none; } +a[href]:link { color: rgb(196,69,29); } +a[href]:visited { color: rgb(171,105,84); } +a[href]:hover { text-decoration:underline; } + +/* @end */ + +/* @group Fonts & Sizes */ + +/* Basic technique & IE workarounds from YUI 3 + For reasons, see: + http://yui.yahooapis.com/3.1.1/build/cssfonts/fonts.css + */ + +body { + font:13px/1.4 sans-serif; + *font-size:small; /* for IE */ + *font:x-small; /* for IE in quirks mode */ +} + +h1 { font-size: 146.5%; /* 19pt */ } +h2 { font-size: 131%; /* 17pt */ } +h3 { font-size: 116%; /* 15pt */ } +h4 { font-size: 100%; /* 13pt */ } +h5 { font-size: 100%; /* 13pt */ } + +select, input, button, textarea { + font:99% sans-serif; +} + +table { + font-size:inherit; + font:100%; +} + +pre, code, kbd, samp, tt, .src { + font-family:monospace; + *font-size:108%; + line-height: 124%; +} + +.links, .link { + font-size: 85%; /* 11pt */ +} + +#module-header .caption { + font-size: 182%; /* 24pt */ +} + +.info { + font-size: 85%; /* 11pt */ +} + +#table-of-contents, #synopsis { + /* font-size: 85%; /* 11pt */ +} + + +/* @end */ + +/* @group Common */ + +.caption, h1, h2, h3, h4, h5, h6 { + font-weight: bold; + color: rgb(78,98,114); + margin: 0.8em 0 0.4em; +} + +* + h1, * + h2, * + h3, * + h4, * + h5, * + h6 { + margin-top: 2em; +} + +h1 + h2, h2 + h3, h3 + h4, h4 + h5, h5 + h6 { + margin-top: inherit; +} + +ul.links { + list-style: none; + text-align: left; + float: right; + display: inline-table; + margin: 0 0 0 1em; +} + +ul.links li { + display: inline; + border-left: 1px solid #d5d5d5; + white-space: nowrap; + padding: 0; +} + +ul.links li a { + padding: 0.2em 0.5em; +} + +.hide { display: none; } +.show { display: inherit; } +.clear { clear: both; } + +.collapser { + background-image: url(minus.gif); + background-repeat: no-repeat; +} +.expander { + background-image: url(plus.gif); + background-repeat: no-repeat; +} +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} +.collapser, .expander { + padding-left: 14px; + margin-left: -14px; + cursor: pointer; +} + +pre { + padding: 0.25em; + margin: 0.8em 0; + background: rgb(229,237,244); + overflow: auto; + border-bottom: 0.25em solid white; + /* white border adds some space below the box to compensate + for visual extra space that paragraphs have between baseline + and the bounding box */ +} + +.src { + background: #f0f0f0; + padding: 0.2em 0.5em; +} + +.keyword { font-weight: normal; } +.def { font-weight: bold; } + + +/* @end */ + +/* @group Page Structure */ + +#content { + margin: 0 auto; + padding: 0 2em 6em; +} + +#package-header { + background: rgb(41,56,69); + border-top: 5px solid rgb(78,98,114); + color: #ddd; + padding: 0.2em; + position: relative; + text-align: left; +} + +#package-header .caption { + background: url(hslogo-16.png) no-repeat 0em; + color: white; + margin: 0 2em; + font-weight: normal; + font-style: normal; + padding-left: 2em; +} + +#package-header a:link, #package-header a:visited { color: white; } +#package-header a:hover { background: rgb(78,98,114); } + +#module-header .caption { + color: rgb(78,98,114); + font-weight: bold; + border-bottom: 1px solid #ddd; +} + +table.info { + float: right; + padding: 0.5em 1em; + border: 1px solid #ddd; + color: rgb(78,98,114); + background-color: #fff; + max-width: 40%; + border-spacing: 0; + position: relative; + top: -0.5em; + margin: 0 0 0 2em; +} + +.info th { + padding: 0 1em 0 0; +} + +div#style-menu-holder { + position: relative; + z-index: 2; + display: inline; +} + +#style-menu { + position: absolute; + z-index: 1; + overflow: visible; + background: #374c5e; + margin: 0; + text-align: center; + right: 0; + padding: 0; + top: 1.25em; +} + +#style-menu li { + display: list-item; + border-style: none; + margin: 0; + padding: 0; + color: #000; + list-style-type: none; +} + +#style-menu li + li { + border-top: 1px solid #919191; +} + +#style-menu a { + width: 6em; + padding: 3px; + display: block; +} + +#footer { + background: #ddd; + border-top: 1px solid #aaa; + padding: 0.5em 0; + color: #666; + text-align: center; + position: absolute; + bottom: 0; + width: 100%; + height: 3em; +} + +/* @end */ + +/* @group Front Matter */ + +#table-of-contents { + float: right; + clear: right; + background: #faf9dc; + border: 1px solid #d8d7ad; + padding: 0.5em 1em; + max-width: 20em; + margin: 0.5em 0 1em 1em; +} + +#table-of-contents .caption { + text-align: center; + margin: 0; +} + +#table-of-contents ul { + list-style: none; + margin: 0; +} + +#table-of-contents ul ul { + margin-left: 2em; +} + +#description .caption { + display: none; +} + +#synopsis { + display: none; +} + +.no-frame #synopsis { + display: block; + position: fixed; + right: 0; + height: 80%; + top: 10%; + padding: 0; + max-width: 75%; +} + +#synopsis .caption { + float: left; + width: 29px; + color: rgba(255,255,255,0); + height: 110px; + margin: 0; + font-size: 1px; + padding: 0; +} + +#synopsis p.caption.collapser { + background: url(synopsis.png) no-repeat -64px -8px; +} + +#synopsis p.caption.expander { + background: url(synopsis.png) no-repeat 0px -8px; +} + +#synopsis ul { + height: 100%; + overflow: auto; + padding: 0.5em; + margin: 0; +} + +#synopsis ul ul { + overflow: hidden; +} + +#synopsis ul, +#synopsis ul li.src { + background-color: #faf9dc; + white-space: nowrap; + list-style: none; + margin-left: 0; +} + +/* @end */ + +/* @group Main Content */ + +#interface div.top { margin: 2em 0; } +#interface h1 + div.top, +#interface h2 + div.top, +#interface h3 + div.top, +#interface h4 + div.top, +#interface h5 + div.top { + margin-top: 1em; +} +#interface p.src .link { + float: right; + color: #919191; + border-left: 1px solid #919191; + background: #f0f0f0; + padding: 0 0.5em 0.2em; + margin: 0 -0.5em 0 0.5em; +} + +#interface td.src .link { + float: right; + color: #919191; + border-left: 1px solid #919191; + background: #f0f0f0; + padding: 0 0.5em 0.2em; + margin: 0 -0.5em 0 0.5em; +} + +#interface span.fixity { + color: #919191; + border-left: 1px solid #919191; + padding: 0.2em 0.5em 0.2em 0.5em; + margin: 0 -1em 0 1em; +} + +#interface span.rightedge { + border-left: 1px solid #919191; + padding: 0.2em 0 0.2em 0; + margin: 0 0 0 1em; +} + +#interface table { border-spacing: 2px; } +#interface td { + vertical-align: top; + padding-left: 0.5em; +} +#interface td.src { + white-space: nowrap; +} +#interface td.doc p { + margin: 0; +} +#interface td.doc p + p { + margin-top: 0.8em; +} + +.clearfix:after { + clear: both; + content: " "; + display: block; + height: 0; + visibility: hidden; +} + +.subs dl { + margin: 0; +} + +.subs dt { + float: left; + clear: left; + display: block; + margin: 1px 0; +} + +.subs dd { + float: right; + width: 90%; + display: block; + padding-left: 0.5em; + margin-bottom: 0.5em; +} + +.subs dd.empty { + display: none; +} + +.subs dd p { + margin: 0; +} + +/* Render short-style data instances */ +.inst ul { + height: 100%; + padding: 0.5em; + margin: 0; +} + +.inst, .inst li { + list-style: none; + margin-left: 1em; +} + +/* Workaround for bug in Firefox (issue #384) */ +.inst-left { + float: left; +} + +.top p.src { + border-top: 1px solid #ccc; +} + +.subs, .doc { + /* use this selector for one level of indent */ + padding-left: 2em; +} + +.warning { + color: red; +} + +.arguments { + margin-top: -0.4em; +} +.arguments .caption { + display: none; +} + +.fields { padding-left: 1em; } + +.fields .caption { display: none; } + +.fields p { margin: 0 0; } + +/* this seems bulky to me +.methods, .constructors { + background: #f8f8f8; + border: 1px solid #eee; +} +*/ + +/* @end */ + +/* @group Auxillary Pages */ + + +.extension-list { + list-style-type: none; + margin-left: 0; +} + +#mini { + margin: 0 auto; + padding: 0 1em 1em; +} + +#mini > * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/tensorflow-records-0.1.0.0/plus.gif b/docs/haddock/tensorflow-records-0.1.0.0/plus.gif new file mode 100644 index 0000000..2d15c14 Binary files /dev/null and b/docs/haddock/tensorflow-records-0.1.0.0/plus.gif differ diff --git a/docs/haddock/tensorflow-records-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-records-0.1.0.0/synopsis.png new file mode 100644 index 0000000..85fb86e Binary files /dev/null and b/docs/haddock/tensorflow-records-0.1.0.0/synopsis.png differ diff --git a/docs/haddock/tensorflow-records-0.1.0.0/tensorflow-records.txt b/docs/haddock/tensorflow-records-0.1.0.0/tensorflow-records.txt new file mode 100644 index 0000000..6b34367 --- /dev/null +++ b/docs/haddock/tensorflow-records-0.1.0.0/tensorflow-records.txt @@ -0,0 +1,35 @@ +-- Hoogle documentation, generated by Haddock +-- See Hoogle, http://www.haskell.org/hoogle/ + + +-- | Encoder and decoder for the TensorFlow \"TFRecords\" format. +-- +-- Encoder and decoder for the TensorFlow "TFRecords" format. +@package tensorflow-records +@version 0.1.0.0 + + +-- | Encoder and decoder for the TensorFlow "TFRecords" format. +module TensorFlow.Records + +-- | Put one TFRecord with the given contents. +putTFRecord :: ByteString -> Put + +-- | Parse one TFRecord. +getTFRecord :: Get ByteString + +-- | Parse many TFRecords as a list. Note you probably want streaming +-- instead as provided by the tensorflow-records-conduit package. +getTFRecords :: Get [ByteString] + +-- | Get a length and verify its checksum. +getTFRecordLength :: Get Word64 + +-- | Get a record payload and verify its checksum. +getTFRecordData :: Word64 -> Get ByteString + +-- | Put a record length and its checksum. +putTFRecordLength :: Word64 -> Put + +-- | Put a record payload and its checksum. +putTFRecordData :: ByteString -> Put diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/TensorFlow-Records-Conduit.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/TensorFlow-Records-Conduit.html new file mode 100644 index 0000000..c870056 --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/TensorFlow-Records-Conduit.html @@ -0,0 +1,4 @@ +TensorFlow.Records.Conduit

    tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Records.Conduit

    Description

    Conduit wrappers for TensorFlow.Records.

    Synopsis

    Encode/Decode

    encodeTFRecords :: Monad m => Conduit ByteString m ByteString

    Encode TFRecords to a stream of bytes.

    decodeTFRecords :: MonadThrow m => Conduit ByteString m ByteString

    Decode TFRecords from a stream of bytes.

    Source/Sink

    sinkTFRecords :: MonadResource m => FilePath -> Consumer ByteString m ()

    Write TFRecords to a file.

    sourceTFRecords :: (MonadResource m, MonadThrow m) => FilePath -> Producer m ByteString

    Read TFRecords from a file.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/doc-index.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/doc-index.html new file mode 100644 index 0000000..329b86b --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/doc-index.html @@ -0,0 +1,4 @@ +tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records. (Index)

    tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/frames.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/frames.html new file mode 100644 index 0000000..1b4e38d --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/frames.html @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/haddock-util.js b/docs/haddock/tensorflow-records-conduit-0.1.0.0/haddock-util.js new file mode 100644 index 0000000..9a6fccf --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/haddock-util.js @@ -0,0 +1,344 @@ +// Haddock JavaScript utilities + +var rspace = /\s\s+/g, + rtrim = /^\s+|\s+$/g; + +function spaced(s) { return (" " + s + " ").replace(rspace, " "); } +function trim(s) { return s.replace(rtrim, ""); } + +function hasClass(elem, value) { + var className = spaced(elem.className || ""); + return className.indexOf( " " + value + " " ) >= 0; +} + +function addClass(elem, value) { + var className = spaced(elem.className || ""); + if ( className.indexOf( " " + value + " " ) < 0 ) { + elem.className = trim(className + " " + value); + } +} + +function removeClass(elem, value) { + var className = spaced(elem.className || ""); + className = className.replace(" " + value + " ", " "); + elem.className = trim(className); +} + +function toggleClass(elem, valueOn, valueOff, bool) { + if (bool == null) { bool = ! hasClass(elem, valueOn); } + if (bool) { + removeClass(elem, valueOff); + addClass(elem, valueOn); + } + else { + removeClass(elem, valueOn); + addClass(elem, valueOff); + } + return bool; +} + + +function makeClassToggle(valueOn, valueOff) +{ + return function(elem, bool) { + return toggleClass(elem, valueOn, valueOff, bool); + } +} + +toggleShow = makeClassToggle("show", "hide"); +toggleCollapser = makeClassToggle("collapser", "expander"); + +function toggleSection(id) +{ + var b = toggleShow(document.getElementById("section." + id)); + toggleCollapser(document.getElementById("control." + id), b); + rememberCollapsed(id, b); + return b; +} + +var collapsed = {}; +function rememberCollapsed(id, b) +{ + if(b) + delete collapsed[id] + else + collapsed[id] = null; + + var sections = []; + for(var i in collapsed) + { + if(collapsed.hasOwnProperty(i)) + sections.push(i); + } + // cookie specific to this page; don't use setCookie which sets path=/ + document.cookie = "collapsed=" + escape(sections.join('+')); +} + +function restoreCollapsed() +{ + var cookie = getCookie("collapsed"); + if(!cookie) + return; + + var ids = cookie.split('+'); + for(var i in ids) + { + if(document.getElementById("section." + ids[i])) + toggleSection(ids[i]); + } +} + +function setCookie(name, value) { + document.cookie = name + "=" + escape(value) + ";path=/;"; +} + +function clearCookie(name) { + document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; +} + +function getCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) { + return unescape(c.substring(nameEQ.length,c.length)); + } + } + return null; +} + + + +var max_results = 75; // 50 is not enough to search for map in the base libraries +var shown_range = null; +var last_search = null; + +function quick_search() +{ + perform_search(false); +} + +function full_search() +{ + perform_search(true); +} + + +function perform_search(full) +{ + var text = document.getElementById("searchbox").value.toLowerCase(); + if (text == last_search && !full) return; + last_search = text; + + var table = document.getElementById("indexlist"); + var status = document.getElementById("searchmsg"); + var children = table.firstChild.childNodes; + + // first figure out the first node with the prefix + var first = bisect(-1); + var last = (first == -1 ? -1 : bisect(1)); + + if (first == -1) + { + table.className = ""; + status.innerHTML = "No results found, displaying all"; + } + else if (first == 0 && last == children.length - 1) + { + table.className = ""; + status.innerHTML = ""; + } + else if (last - first >= max_results && !full) + { + table.className = ""; + status.innerHTML = "More than " + max_results + ", press Search to display"; + } + else + { + // decide what you need to clear/show + if (shown_range) + setclass(shown_range[0], shown_range[1], "indexrow"); + setclass(first, last, "indexshow"); + shown_range = [first, last]; + table.className = "indexsearch"; + status.innerHTML = ""; + } + + + function setclass(first, last, status) + { + for (var i = first; i <= last; i++) + { + children[i].className = status; + } + } + + + // do a binary search, treating 0 as ... + // return either -1 (no 0's found) or location of most far match + function bisect(dir) + { + var first = 0, finish = children.length - 1; + var mid, success = false; + + while (finish - first > 3) + { + mid = Math.floor((finish + first) / 2); + + var i = checkitem(mid); + if (i == 0) i = dir; + if (i == -1) + finish = mid; + else + first = mid; + } + var a = (dir == 1 ? first : finish); + var b = (dir == 1 ? finish : first); + for (var i = b; i != a - dir; i -= dir) + { + if (checkitem(i) == 0) return i; + } + return -1; + } + + + // from an index, decide what the result is + // 0 = match, -1 is lower, 1 is higher + function checkitem(i) + { + var s = getitem(i).toLowerCase().substr(0, text.length); + if (s == text) return 0; + else return (s > text ? -1 : 1); + } + + + // from an index, get its string + // this abstracts over alternates + function getitem(i) + { + for ( ; i >= 0; i--) + { + var s = children[i].firstChild.firstChild.data; + if (s.indexOf(' ') == -1) + return s; + } + return ""; // should never be reached + } +} + +function setSynopsis(filename) { + if (parent.window.synopsis) { + if (parent.window.synopsis.location.replace) { + // In Firefox this avoids adding the change to the history. + parent.window.synopsis.location.replace(filename); + } else { + parent.window.synopsis.location = filename; + } + } +} + +function addMenuItem(html) { + var menu = document.getElementById("page-menu"); + if (menu) { + var btn = menu.firstChild.cloneNode(false); + btn.innerHTML = html; + menu.appendChild(btn); + } +} + +function adjustForFrames() { + var bodyCls; + + if (parent.location.href == window.location.href) { + // not in frames, so add Frames button + addMenuItem("Frames"); + bodyCls = "no-frame"; + } + else { + bodyCls = "in-frame"; + } + addClass(document.body, bodyCls); +} + +function reframe() { + setCookie("haddock-reframe", document.URL); + window.location = "frames.html"; +} + +function postReframe() { + var s = getCookie("haddock-reframe"); + if (s) { + parent.window.main.location = s; + clearCookie("haddock-reframe"); + } +} + +function styles() { + var i, a, es = document.getElementsByTagName("link"), rs = []; + for (i = 0; a = es[i]; i++) { + if(a.rel.indexOf("style") != -1 && a.title) { + rs.push(a); + } + } + return rs; +} + +function addStyleMenu() { + var as = styles(); + var i, a, btns = ""; + for(i=0; a = as[i]; i++) { + btns += "
  • " + + a.title + "
  • " + } + if (as.length > 1) { + var h = "
    " + + "Style ▾" + + "
      " + btns + "
    " + + "
    "; + addMenuItem(h); + } +} + +function setActiveStyleSheet(title) { + var as = styles(); + var i, a, found; + for(i=0; a = as[i]; i++) { + a.disabled = true; + // need to do this always, some browsers are edge triggered + if(a.title == title) { + found = a; + } + } + if (found) { + found.disabled = false; + setCookie("haddock-style", title); + } + else { + as[0].disabled = false; + clearCookie("haddock-style"); + } + styleMenu(false); +} + +function resetStyle() { + var s = getCookie("haddock-style"); + if (s) setActiveStyleSheet(s); +} + + +function styleMenu(show) { + var m = document.getElementById('style-menu'); + if (m) toggleShow(m, show); +} + + +function pageLoad() { + addStyleMenu(); + adjustForFrames(); + resetStyle(); + restoreCollapsed(); +} + diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/hslogo-16.png b/docs/haddock/tensorflow-records-conduit-0.1.0.0/hslogo-16.png new file mode 100644 index 0000000..0ff8579 Binary files /dev/null and b/docs/haddock/tensorflow-records-conduit-0.1.0.0/hslogo-16.png differ diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/index-frames.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/index-frames.html new file mode 100644 index 0000000..758a8f3 --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/index-frames.html @@ -0,0 +1,4 @@ +tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records. \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/index.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/index.html new file mode 100644 index 0000000..1fc992b --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/index.html @@ -0,0 +1,4 @@ +tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

    tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

    tensorflow-records-conduit-0.1.0.0: Conduit wrappers for TensorFlow.Records.

    Conduit wrappers for TensorFlow.Records.

    Modules

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/mini_TensorFlow-Records-Conduit.html b/docs/haddock/tensorflow-records-conduit-0.1.0.0/mini_TensorFlow-Records-Conduit.html new file mode 100644 index 0000000..5fc00bb --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/mini_TensorFlow-Records-Conduit.html @@ -0,0 +1,4 @@ +TensorFlow.Records.Conduit

    TensorFlow.Records.Conduit

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/minus.gif b/docs/haddock/tensorflow-records-conduit-0.1.0.0/minus.gif new file mode 100644 index 0000000..1deac2f Binary files /dev/null and b/docs/haddock/tensorflow-records-conduit-0.1.0.0/minus.gif differ diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/ocean.css b/docs/haddock/tensorflow-records-conduit-0.1.0.0/ocean.css new file mode 100644 index 0000000..1110b40 --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/ocean.css @@ -0,0 +1,600 @@ +/* @group Fundamentals */ + +* { margin: 0; padding: 0 } + +/* Is this portable? */ +html { + background-color: white; + width: 100%; + height: 100%; +} + +body { + background: white; + color: black; + text-align: left; + min-height: 100%; + position: relative; +} + +p { + margin: 0.8em 0; +} + +ul, ol { + margin: 0.8em 0 0.8em 2em; +} + +dl { + margin: 0.8em 0; +} + +dt { + font-weight: bold; +} +dd { + margin-left: 2em; +} + +a { text-decoration: none; } +a[href]:link { color: rgb(196,69,29); } +a[href]:visited { color: rgb(171,105,84); } +a[href]:hover { text-decoration:underline; } + +/* @end */ + +/* @group Fonts & Sizes */ + +/* Basic technique & IE workarounds from YUI 3 + For reasons, see: + http://yui.yahooapis.com/3.1.1/build/cssfonts/fonts.css + */ + +body { + font:13px/1.4 sans-serif; + *font-size:small; /* for IE */ + *font:x-small; /* for IE in quirks mode */ +} + +h1 { font-size: 146.5%; /* 19pt */ } +h2 { font-size: 131%; /* 17pt */ } +h3 { font-size: 116%; /* 15pt */ } +h4 { font-size: 100%; /* 13pt */ } +h5 { font-size: 100%; /* 13pt */ } + +select, input, button, textarea { + font:99% sans-serif; +} + +table { + font-size:inherit; + font:100%; +} + +pre, code, kbd, samp, tt, .src { + font-family:monospace; + *font-size:108%; + line-height: 124%; +} + +.links, .link { + font-size: 85%; /* 11pt */ +} + +#module-header .caption { + font-size: 182%; /* 24pt */ +} + +.info { + font-size: 85%; /* 11pt */ +} + +#table-of-contents, #synopsis { + /* font-size: 85%; /* 11pt */ +} + + +/* @end */ + +/* @group Common */ + +.caption, h1, h2, h3, h4, h5, h6 { + font-weight: bold; + color: rgb(78,98,114); + margin: 0.8em 0 0.4em; +} + +* + h1, * + h2, * + h3, * + h4, * + h5, * + h6 { + margin-top: 2em; +} + +h1 + h2, h2 + h3, h3 + h4, h4 + h5, h5 + h6 { + margin-top: inherit; +} + +ul.links { + list-style: none; + text-align: left; + float: right; + display: inline-table; + margin: 0 0 0 1em; +} + +ul.links li { + display: inline; + border-left: 1px solid #d5d5d5; + white-space: nowrap; + padding: 0; +} + +ul.links li a { + padding: 0.2em 0.5em; +} + +.hide { display: none; } +.show { display: inherit; } +.clear { clear: both; } + +.collapser { + background-image: url(minus.gif); + background-repeat: no-repeat; +} +.expander { + background-image: url(plus.gif); + background-repeat: no-repeat; +} +p.caption.collapser, +p.caption.expander { + background-position: 0 0.4em; +} +.collapser, .expander { + padding-left: 14px; + margin-left: -14px; + cursor: pointer; +} + +pre { + padding: 0.25em; + margin: 0.8em 0; + background: rgb(229,237,244); + overflow: auto; + border-bottom: 0.25em solid white; + /* white border adds some space below the box to compensate + for visual extra space that paragraphs have between baseline + and the bounding box */ +} + +.src { + background: #f0f0f0; + padding: 0.2em 0.5em; +} + +.keyword { font-weight: normal; } +.def { font-weight: bold; } + + +/* @end */ + +/* @group Page Structure */ + +#content { + margin: 0 auto; + padding: 0 2em 6em; +} + +#package-header { + background: rgb(41,56,69); + border-top: 5px solid rgb(78,98,114); + color: #ddd; + padding: 0.2em; + position: relative; + text-align: left; +} + +#package-header .caption { + background: url(hslogo-16.png) no-repeat 0em; + color: white; + margin: 0 2em; + font-weight: normal; + font-style: normal; + padding-left: 2em; +} + +#package-header a:link, #package-header a:visited { color: white; } +#package-header a:hover { background: rgb(78,98,114); } + +#module-header .caption { + color: rgb(78,98,114); + font-weight: bold; + border-bottom: 1px solid #ddd; +} + +table.info { + float: right; + padding: 0.5em 1em; + border: 1px solid #ddd; + color: rgb(78,98,114); + background-color: #fff; + max-width: 40%; + border-spacing: 0; + position: relative; + top: -0.5em; + margin: 0 0 0 2em; +} + +.info th { + padding: 0 1em 0 0; +} + +div#style-menu-holder { + position: relative; + z-index: 2; + display: inline; +} + +#style-menu { + position: absolute; + z-index: 1; + overflow: visible; + background: #374c5e; + margin: 0; + text-align: center; + right: 0; + padding: 0; + top: 1.25em; +} + +#style-menu li { + display: list-item; + border-style: none; + margin: 0; + padding: 0; + color: #000; + list-style-type: none; +} + +#style-menu li + li { + border-top: 1px solid #919191; +} + +#style-menu a { + width: 6em; + padding: 3px; + display: block; +} + +#footer { + background: #ddd; + border-top: 1px solid #aaa; + padding: 0.5em 0; + color: #666; + text-align: center; + position: absolute; + bottom: 0; + width: 100%; + height: 3em; +} + +/* @end */ + +/* @group Front Matter */ + +#table-of-contents { + float: right; + clear: right; + background: #faf9dc; + border: 1px solid #d8d7ad; + padding: 0.5em 1em; + max-width: 20em; + margin: 0.5em 0 1em 1em; +} + +#table-of-contents .caption { + text-align: center; + margin: 0; +} + +#table-of-contents ul { + list-style: none; + margin: 0; +} + +#table-of-contents ul ul { + margin-left: 2em; +} + +#description .caption { + display: none; +} + +#synopsis { + display: none; +} + +.no-frame #synopsis { + display: block; + position: fixed; + right: 0; + height: 80%; + top: 10%; + padding: 0; + max-width: 75%; +} + +#synopsis .caption { + float: left; + width: 29px; + color: rgba(255,255,255,0); + height: 110px; + margin: 0; + font-size: 1px; + padding: 0; +} + +#synopsis p.caption.collapser { + background: url(synopsis.png) no-repeat -64px -8px; +} + +#synopsis p.caption.expander { + background: url(synopsis.png) no-repeat 0px -8px; +} + +#synopsis ul { + height: 100%; + overflow: auto; + padding: 0.5em; + margin: 0; +} + +#synopsis ul ul { + overflow: hidden; +} + +#synopsis ul, +#synopsis ul li.src { + background-color: #faf9dc; + white-space: nowrap; + list-style: none; + margin-left: 0; +} + +/* @end */ + +/* @group Main Content */ + +#interface div.top { margin: 2em 0; } +#interface h1 + div.top, +#interface h2 + div.top, +#interface h3 + div.top, +#interface h4 + div.top, +#interface h5 + div.top { + margin-top: 1em; +} +#interface p.src .link { + float: right; + color: #919191; + border-left: 1px solid #919191; + background: #f0f0f0; + padding: 0 0.5em 0.2em; + margin: 0 -0.5em 0 0.5em; +} + +#interface td.src .link { + float: right; + color: #919191; + border-left: 1px solid #919191; + background: #f0f0f0; + padding: 0 0.5em 0.2em; + margin: 0 -0.5em 0 0.5em; +} + +#interface span.fixity { + color: #919191; + border-left: 1px solid #919191; + padding: 0.2em 0.5em 0.2em 0.5em; + margin: 0 -1em 0 1em; +} + +#interface span.rightedge { + border-left: 1px solid #919191; + padding: 0.2em 0 0.2em 0; + margin: 0 0 0 1em; +} + +#interface table { border-spacing: 2px; } +#interface td { + vertical-align: top; + padding-left: 0.5em; +} +#interface td.src { + white-space: nowrap; +} +#interface td.doc p { + margin: 0; +} +#interface td.doc p + p { + margin-top: 0.8em; +} + +.clearfix:after { + clear: both; + content: " "; + display: block; + height: 0; + visibility: hidden; +} + +.subs dl { + margin: 0; +} + +.subs dt { + float: left; + clear: left; + display: block; + margin: 1px 0; +} + +.subs dd { + float: right; + width: 90%; + display: block; + padding-left: 0.5em; + margin-bottom: 0.5em; +} + +.subs dd.empty { + display: none; +} + +.subs dd p { + margin: 0; +} + +/* Render short-style data instances */ +.inst ul { + height: 100%; + padding: 0.5em; + margin: 0; +} + +.inst, .inst li { + list-style: none; + margin-left: 1em; +} + +/* Workaround for bug in Firefox (issue #384) */ +.inst-left { + float: left; +} + +.top p.src { + border-top: 1px solid #ccc; +} + +.subs, .doc { + /* use this selector for one level of indent */ + padding-left: 2em; +} + +.warning { + color: red; +} + +.arguments { + margin-top: -0.4em; +} +.arguments .caption { + display: none; +} + +.fields { padding-left: 1em; } + +.fields .caption { display: none; } + +.fields p { margin: 0 0; } + +/* this seems bulky to me +.methods, .constructors { + background: #f8f8f8; + border: 1px solid #eee; +} +*/ + +/* @end */ + +/* @group Auxillary Pages */ + + +.extension-list { + list-style-type: none; + margin-left: 0; +} + +#mini { + margin: 0 auto; + padding: 0 1em 1em; +} + +#mini > * { + font-size: 93%; /* 12pt */ +} + +#mini #module-list .caption, +#mini #module-header .caption { + font-size: 125%; /* 15pt */ +} + +#mini #interface h1, +#mini #interface h2, +#mini #interface h3, +#mini #interface h4 { + font-size: 109%; /* 13pt */ + margin: 1em 0 0; +} + +#mini #interface .top, +#mini #interface .src { + margin: 0; +} + +#mini #module-list ul { + list-style: none; + margin: 0; +} + +#alphabet ul { + list-style: none; + padding: 0; + margin: 0.5em 0 0; + text-align: center; +} + +#alphabet li { + display: inline; + margin: 0 0.25em; +} + +#alphabet a { + font-weight: bold; +} + +#index .caption, +#module-list .caption { font-size: 131%; /* 17pt */ } + +#index table { + margin-left: 2em; +} + +#index .src { + font-weight: bold; +} +#index .alt { + font-size: 77%; /* 10pt */ + font-style: italic; + padding-left: 2em; +} + +#index td + td { + padding-left: 1em; +} + +#module-list ul { + list-style: none; + margin: 0 0 0 2em; +} + +#module-list li { + clear: right; +} + +#module-list span.collapser, +#module-list span.expander { + background-position: 0 0.3em; +} + +#module-list .package { + float: right; +} + +/* @end */ diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/plus.gif b/docs/haddock/tensorflow-records-conduit-0.1.0.0/plus.gif new file mode 100644 index 0000000..2d15c14 Binary files /dev/null and b/docs/haddock/tensorflow-records-conduit-0.1.0.0/plus.gif differ diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/synopsis.png b/docs/haddock/tensorflow-records-conduit-0.1.0.0/synopsis.png new file mode 100644 index 0000000..85fb86e Binary files /dev/null and b/docs/haddock/tensorflow-records-conduit-0.1.0.0/synopsis.png differ diff --git a/docs/haddock/tensorflow-records-conduit-0.1.0.0/tensorflow-records-conduit.txt b/docs/haddock/tensorflow-records-conduit-0.1.0.0/tensorflow-records-conduit.txt new file mode 100644 index 0000000..8344fa9 --- /dev/null +++ b/docs/haddock/tensorflow-records-conduit-0.1.0.0/tensorflow-records-conduit.txt @@ -0,0 +1,25 @@ +-- Hoogle documentation, generated by Haddock +-- See Hoogle, http://www.haskell.org/hoogle/ + + +-- | Conduit wrappers for TensorFlow.Records. +-- +-- Conduit wrappers for TensorFlow.Records. +@package tensorflow-records-conduit +@version 0.1.0.0 + + +-- | Conduit wrappers for TensorFlow.Records. +module TensorFlow.Records.Conduit + +-- | Encode TFRecords to a stream of bytes. +encodeTFRecords :: Monad m => Conduit ByteString m ByteString + +-- | Decode TFRecords from a stream of bytes. +decodeTFRecords :: MonadThrow m => Conduit ByteString m ByteString + +-- | Write TFRecords to a file. +sinkTFRecords :: (MonadResource m) => FilePath -> Consumer ByteString m () + +-- | Read TFRecords from a file. +sourceTFRecords :: (MonadResource m, MonadThrow m) => FilePath -> Producer m ByteString diff --git a/docs/haddock/tensorflow-test-0.1.0.0/TensorFlow-Test.html b/docs/haddock/tensorflow-test-0.1.0.0/TensorFlow-Test.html index badc48c..4205a18 100644 --- a/docs/haddock/tensorflow-test-0.1.0.0/TensorFlow-Test.html +++ b/docs/haddock/tensorflow-test-0.1.0.0/TensorFlow-Test.html @@ -1,5 +1,5 @@ TensorFlow.Test

    tensorflow-test-0.1.0.0: Some common functions for test suites.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Test

    Documentation

    assertAllClose :: Vector Float -> Vector Float -> Assertion Source

    Compares that the vectors are element-by-element equal within the given +

    tensorflow-test-0.1.0.0: Some common functions for test suites.

    Safe HaskellNone
    LanguageHaskell2010

    TensorFlow.Test

    Synopsis

    Documentation

    assertAllClose :: Vector Float -> Vector Float -> Assertion

    Compares that the vectors are element-by-element equal within the given tolerance. Raises an assertion and prints some information if not.

    \ No newline at end of file diff --git a/docs/haddock/tensorflow-test-0.1.0.0/src/TensorFlow-Test.html b/docs/haddock/tensorflow-test-0.1.0.0/src/TensorFlow-Test.html deleted file mode 100644 index 1de2856..0000000 --- a/docs/haddock/tensorflow-test-0.1.0.0/src/TensorFlow-Test.html +++ /dev/null @@ -1,43 +0,0 @@ - - - - - -src/TensorFlow/Test.hs - - - -
    -- Copyright 2016 TensorFlow authors.
    ---
    --- Licensed under the Apache License, Version 2.0 (the "License");
    --- you may not use this file except in compliance with the License.
    --- You may obtain a copy of the License at
    ---
    ---     http://www.apache.org/licenses/LICENSE-2.0
    ---
    --- Unless required by applicable law or agreed to in writing, software
    --- distributed under the License is distributed on an "AS IS" BASIS,
    --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    --- See the License for the specific language governing permissions and
    --- limitations under the License.
    -
    -{-# LANGUAGE OverloadedStrings #-}
    -
    -module TensorFlow.Test
    -    ( assertAllClose
    -    ) where
    -
    -import qualified Data.Vector as V
    -import Test.HUnit ((@?))
    -import Test.HUnit.Lang (Assertion)
    --- | Compares that the vectors are element-by-element equal within the given
    --- tolerance. Raises an assertion and prints some information if not.
    -assertAllClose :: V.Vector Float -> V.Vector Float -> Assertion
    -assertAllClose xs ys = all (<= tol) (V.zipWith absDiff xs ys) @?
    -    "Difference > tolerance: \nxs: " ++ show xs ++ "\nys: " ++ show ys
    -        ++ "\ntolerance: " ++ show tol
    -  where
    -      absDiff x y = abs (x - y)
    -      tol = 0.001 :: Float
    -
    - diff --git a/docs/haddock/tensorflow-test-0.1.0.0/src/hscolour.css b/docs/haddock/tensorflow-test-0.1.0.0/src/hscolour.css deleted file mode 100644 index c15919e..0000000 --- a/docs/haddock/tensorflow-test-0.1.0.0/src/hscolour.css +++ /dev/null @@ -1,5 +0,0 @@ -.hs-keyglyph, .hs-layout {color: red;} -.hs-keyword {color: blue;} -.hs-comment, .hs-comment a {color: green;} -.hs-str, .hs-chr {color: teal;} -.hs-keyword, .hs-conid, .hs-varid, .hs-conop, .hs-varop, .hs-num, .hs-cpp, .hs-sel, .hs-definition {}